neural net example font

General FreeBASIC programming questions.
Post Reply
bluatigro
Posts: 660
Joined: Apr 25, 2012 10:35
Location: netherlands

neural net example font

Post by bluatigro »

i only missin the create functions

i need some help tranlating ths to FB

Code: Select all

//bluatigro 5 jun 2017
//neural net from internet :
//http://www.learnartificialneuralnetworks.com/neural-network-software/backpropagation-source-code/

#include <iostream>
using namespace std;
struct neuron
{
    float *weights; // neuron input weights or synaptic connections
    float *deltavalues; //neuron delta values
    float output; //output value
    float gain;//Gain value
    float wgain;//Weight gain value

    neuron();//Constructor
    ~neuron();//Destructor
    void create(int inputcount);//Allocates memory and initializates values
};
struct layer
{
    neuron **neurons;//The array of neurons
    int neuroncount;//The total count of neurons
    float *layerinput;//The layer input
    int inputcount;//The total count of elements in layerinput

    layer();//Object constructor. Initializates all values as 0

    ~layer();//Destructor. Frees the memory used by the layer

    void create(int inputsize, int _neuroncount);//Creates the layer and allocates memory
    void calculate();//Calculates all neurons performing the network formula
};
class bpnet
{
private:
    layer m_inputlayer;//input layer of the network
    layer m_outputlayer;//output layer..contains the result of applying the network
    layer **m_hiddenlayers;//Additional hidden layers
    int m_hiddenlayercount;//the count of additional hidden layers

public:
//function tu create in memory the network structure
    bpnet();//Construction..initialzates all values to 0
    ~bpnet();//Destructor..releases memory
    //Creates the network structure on memory
    void create(int inputcount,int inputneurons,int outputcount,int *hiddenlayers,int hiddenlayercount);

    void propagate(const float *input);//Calculates the network values given an input pattern
    //Updates the weight values of the network given a desired output and applying the backpropagation
    //Algorithm
    float train(const float *desiredoutput,const float *input,float alpha, float momentum);

    //Updates the next layer input values
    void update(int layerindex);

    //Returns the output layer..this is useful to get the output values of the network
    inline layer &getOutput()
    {
        return m_outputlayer;
    }

};
void layer::calculate()
{
    int i,j;
    float sum;
    //Apply the formula for each neuron
    for(i=0;i<neuroncount;i++)
    {
        sum=0;//store the sum of all values here
        for(j=0;j<inputcount;j++)
        {
        //Performing function
            sum+=neurons[i]->weights[j] * layerinput[j]; //apply input * weight
        }
        sum+=neurons[i]->wgain * neurons[i]->gain; //apply the gain or theta multiplied by the gain weight.
        //sigmoidal activation function
        neurons[i]->output= 1.f/(1.f + exp(-sum));//calculate the sigmoid function
    }
}
void bpnet::propagate(const float *input)
{
    //The propagation function should start from the input layer
    //first copy the input vector to the input layer Always make sure the size
    //"array input" has the same size of inputcount
    memcpy(m_inputlayer.layerinput,input,m_inputlayer.inputcount * sizeof(float));
    //now calculate the inputlayer
    m_inputlayer.calculate();

    update(-1);//propagate the inputlayer out values to the next layer
    if(m_hiddenlayers)
    {
        //Calculating hidden layers if any
        for(int i=0;i<m_hiddenlayercount;i++)
        {
            m_hiddenlayers[i]->calculate();
            update(i);
        }
    }

    //calculating the final statge: the output layer
    m_outputlayer.calculate();
}
//Main training function. Run this function in a loop as many times needed per pattern
float bpnet::train(const float *desiredoutput, const float *input, float alpha, float momentum)
{
    //function train, teaches the network to recognize a pattern given a desired output
    float errorg=0; //general quadratic error
    float errorc; //local error;
    float sum=0,csum=0;
    float delta,udelta;
    float output;
    //first we begin by propagating the input
    propagate(input);
    int i,j,k;
    //the backpropagation algorithm starts from the output layer propagating the error  from the output
    //layer to the input layer
    for(i=0;i<m_outputlayer.neuroncount;i++)
    {
        //calculate the error value for the output layer
        output=m_outputlayer.neurons[i]->output; //copy this value to facilitate calculations
        //from the algorithm we can take the error value as
        errorc=(desiredoutput[i] - output) * output * (1 - output);
        //and the general error as the sum of delta values. Where delta is the squared difference
        //of the desired value with the output value
        //quadratic error
        errorg+=(desiredoutput[i] - output) * (desiredoutput[i] - output) ;

        //now we proceed to update the weights of the neuron
        for(j=0;j<m_outputlayer.inputcount;j++)
        {
            //get the current delta value
            delta=m_outputlayer.neurons[i]->deltavalues[j];
            //update the delta value
            udelta=alpha * errorc * m_outputlayer.layerinput[j] + delta * momentum;
            //update the weight values
            m_outputlayer.neurons[i]->weights[j]+=udelta;
            m_outputlayer.neurons[i]->deltavalues[j]=udelta;

            //we need this to propagate to the next layer
            sum+=m_outputlayer.neurons[i]->weights[j] * errorc;
        }

        //calculate the weight gain
        m_outputlayer.neurons[i]->wgain+= alpha * errorc * m_outputlayer.neurons[i]->gain;

    }

    for(i=(m_hiddenlayercount - 1);i>=0;i--)
    {
        for(j=0;j<m_hiddenlayers[i]->neuroncount;j++)
        {
            output=m_hiddenlayers[i]->neurons[j]->output;
            //calculate the error for this layer
            errorc= output * (1-output) * sum;
            //update neuron weights
            for(k=0;k<m_hiddenlayers[i]->inputcount;k++)
            {
                delta=m_hiddenlayers[i]->neurons[j]->deltavalues[k];
                udelta= alpha * errorc * m_hiddenlayers[i]->layerinput[k] + delta * momentum;
                m_hiddenlayers[i]->neurons[j]->weights[k]+=udelta;
                m_hiddenlayers[i]->neurons[j]->deltavalues[k]=udelta;
                csum+=m_hiddenlayers[i]->neurons[j]->weights[k] * errorc;//needed for next layer

            }

            m_hiddenlayers[i]->neurons[j]->wgain+=alpha * errorc * m_hiddenlayers[i]->neurons[j]->gain;

        }
        sum=csum;
        csum=0;
    }

    //and finally process the input layer
    for(i=0;i<m_inputlayer.neuroncount;i++)
    {
        output=m_inputlayer.neurons[i]->output;
        errorc=output * (1 - output) * sum;

        for(j=0;j<m_inputlayer.inputcount;j++)
        {
            delta=m_inputlayer.neurons[i]->deltavalues[j];
            udelta=alpha * errorc * m_inputlayer.layerinput[j] + delta * momentum;
            //update weights
            m_inputlayer.neurons[i]->weights[j]+=udelta;
            m_inputlayer.neurons[i]->deltavalues[j]=udelta;
        }
        //and update the gain weight
        m_inputlayer.neurons[i]->wgain+=alpha * errorc * m_inputlayer.neurons[i]->gain;
    }

    //return the general error divided by 2
    return errorg / 2 ;
}
#define PATTERN_COUNT 4
#define PATTERN_SIZE 2
#define NETWORK_INPUTNEURONS 3
#define NETWORK_OUTPUT 1
#define HIDDEN_LAYERS 0
#define EPOCHS 20000


int main()
{
    //Create some patterns
    //playing with xor
    //XOR input values
    float pattern[PATTERN_COUNT][PATTERN_SIZE]=
    {
        {0,0},
        {0,1},
        {1,0},
        {1,1}
    };

    //XOR desired output values
    float desiredout[PATTERN_COUNT][NETWORK_OUTPUT]=
    {
        {0},
        {1},
        {1},
        {0}
    };


    bpnet net;//Our neural network object
    int i,j;
    float error;
    //We create the network
    net.create(PATTERN_SIZE,NETWORK_INPUTNEURONS,NETWORK_OUTPUT,HIDDEN_LAYERS,HIDDEN_LAYERS);

    //Start the neural network training
    for(i=0;i<EPOCHS;i++)
    {
        error=0;
        for(j=0;j<PATTERN_COUNT;j++)
        {
            error+=net.train(desiredout[j],pattern[j],0.2f,0.1f);
        }
        error/=PATTERN_COUNT;
        //display error
        cout << "ERROR:" << error << "\r";

    }

    //once trained test all patterns

    for(i=0;i<PATTERN_COUNT;i++)
    {

        net.propagate(pattern[i]);

    //display result
        cout << "TESTED PATTERN " << i << " DESIRED OUTPUT: " << *desiredout[i] << " NET RESULT: "<< net.getOutput().neurons[0]->output << endl;
    }


    return 0;
}
i got this far

Code: Select all

''bluatigro 5 jul 2017
''ann
''translated from c++ from :
''http://www.learnartificialneuralnetworks.com/neural-network-software/backpropagation-source-code/

type neuron
  dim as double uitvoer , gain , wgain
  dim as double ptr weights , deltavalues
  declare sub create( i as integer )
  declare constructor ()
  declare destructor ()
end type

type layer
  dim as neuron ptr ptr neurons
  dim as integer neuroncount
  dim as double ptr layerinput
  dim as integer inputcount
  declare sub create( inputsize as integer , numneurons as integer )
  declare sub calculate()
  declare constructor ()
  declare destructor ()
end sub

sub layer.caluculate()
  dim as integer i,j
  dim as double sum
  for i = 0 to neuroncount 
    sum = 0
    for j = 0 to inputcount
      sum += neurons[i].weight[j] * layerinput[j]
    next j
    neurons[i].uitvoer=1/(1+exp(-sum))
  next i
end sub

type bpnet
private :
  dim as layer _inputlayer
  dim as layer _outputlayer
  dim as layer ptr ptr _hiddenlayer
  dim as integer _hiddenlayercount
public :
  declare constructor ()
  declare destructor ()
  declare sub create( in as integer , inn as integer _
  , uit as integer , hidden as integer ptr , hid as integer )
  declare sub propegate( in as double ptr )
  declare function train( wish as double ptr , in as double ptr _
  , a as double , m as double ) as double
  declare sub update( layerindex as integer )
  declare function getoutputlayer() as double ptr
end type

bluatigro
Posts: 660
Joined: Apr 25, 2012 10:35
Location: netherlands

Re: neural net example font

Post by bluatigro »

update :
i translated it first in libertybasic
then in FB

error :
the net don't learn

Code: Select all

''bluatigro 6 jun 2017
''neural net translation from :
''http://www.learnartificialneuralnetworks.com/neural-network-software/backpropagation-source-code/

const as double tosmall = 1e-306
const as integer outputmax = 0
const as integer hiddenmax = 4
const as integer celperlayer = 4

function in(l as integer,c as integer,i as integer)as integer
  return l*celperlayer^2+c*celperlayer+i
end function

dim shared as double cel(hiddenmax,celperlayer)
dim shared as double g(hiddenmax,celperlayer)
dim shared as double wg(hiddenmax,celperlayer)
dim shared as double w(in(hiddenmax,celperlayer,celperlayer))
dim shared as double d(in(hiddenmax,celperlayer,celperlayer))
dim shared as double wish(outputmax)
sub create
  dim as integer i,j,k
  for i = 0 to hiddenmax
    for j = 0 to celperlayer
      for k = 0 to celperlayer
        w(in(i,j,k))=rnd
        d(in(i,j,k))=rnd
      next k
      g(i,j)=rnd
      wg(i,j)=rnd
    next j
  next i
end sub
function signoid( x as double ) as double
  return 1 / ( 1 + exp( 0-x ) )
end function
sub probpegate
  dim as integer layer,celnr,i
  dim as double sum
  for layer = 1 to hiddenmax
    for celnr = 0 to celperlayer
      sum = 0
      for i = 0 to celperlayer
        sum = sum + cel(layer-1,celnr)*w(in(layer,celnr,i))
      next i
      cel(layer,celnr)=signoid(sum)
    next celnr
  next layer
end sub
function train( alpa as double , momentum as double)as double
  probpegate
  dim as integer i,j,k
  dim as double errorc,errorg,uit,delta,udelta,sum,csum
  for i = 0 to outputmax
    uit = cel(hiddenmax,i)
    errorc = ( wish(i) - uit ) * uit * ( 1 - uit )
    errorg = errorg + ( wish(i) - uit ) * ( wish(i) - uit )
    for j = 0 to celperlayer
      delta = d(in(hiddenmax,j,i))
      if delta > tosmall then
        udelta = alpa * errorc * cel(hiddenmax-1,j) _
        + delta * momentum
      else
        udelta=0
      end if
      w(in(hiddenmax,j,i))=w(in(hiddenmax,j,i))+udelta
      d(in(hiddenmax,j,i))=udelta
      sum=sum+w(in(hiddenmax,j,i))
    next j
    wg(hiddenmax,i)=wg(hiddenmax,i)+alpa*errorc*g(hiddenmax,i)
  next i
  for i = hiddenmax - 1 to 1 step -1
    for j = 0 to celperlayer
      uit = cel(i,j)
      errorc = uit * ( 1 - uit ) * sum
      for k = 0 to celperlayer
        delta = d(in(i,j,k))
        'print delta
        if delta > tosmall then
          udelta = alpa * errorc * cel(i-1,k) _
          + delta * momentum
        else
          udelta=0
        end if
        w(in(i,j,k))=w(in(i,j,k))+udelta
        d(in(i,j,k))=udelta
        csum = w(in(i,j,k))*errorc
      next k
      wg(i,j)=wg(i,j)*alpa*errorc
    next j
    sum = csum
    csum = 0
  next i
  return errorg / 2
end function


dim as integer a,b,c,t
dim as double f
create
for t = 0 to 2000
  f = 0
  for a = 0 to 1
    for b = 0 to 1
      for c = 0 to 1
        cel(0,0)=cdbl(a)
        cel(0,1)=cdbl(b)
        cel(0,2)=cdbl(c)
        wish(0)=cdbl(a xor b xor c)
        f = f + train( .1 , .2 )
      next c
    next b
  next a
  print t , f
next t
print "data", "wish" , "net"
for a = 0 to 1
  for b = 0 to 1
    for c = 0 to 1
      cel(0,0)=cdbl(a)
      cel(0,1)=cdbl(b)
      cel(0,2)=cdbl(c)
      probpegate
      print a;b;c , a xor b xor c , cel(hiddenmax,0)
    next c
  next b
next a
print "[ end sim : push return ]"
sleep 
Post Reply