i tryed to translate it
i don't have mutch experiance whit pointers
please help whit the todo's
and typo's
Code: Select all
'' bluatigro 18 okt 2017
'' neural net from c++ :
''http://www.learnartificialneuralnetworks.com/
''neural-network-software/backpropagation-source-code/
type t_neuron
public :
dim as double ptr weights
dim as double ptr dvalues
dim as double uitput
dim as double gain
dim as double wgain
declare constructor()
declare destructor()
declare sub create( inputmax as ulong )
end type
constructor t_neuron()
end constructor
destructor t_neuron()
''todo :
''free memory
end destructor
sub t_neuron.create( inputmax as ulong )
dim as ulong i
''todo :
''claim memory
for i = 0 to inputmax
weights[ i ] = rnd
next i
end sub
type t_layer
public :
dim as t_neuron ptr ptr neurons
dim as ulong neuroncount
dim as double ptr layerinput
dim as ulong inputcount
declare constructor()
declare destructor()
declare sub create( inputsize as ulong , neuronmax as ulong )
declare sub calculate()
end type
constructor t_layer
end constructor
destructor t_layer
''todo :
''free memory
end destructor
sub t_layer.create( inputmax as ulong , neuronmax as ulong )
''todo :
''claim memory
end sub
sub t_layer.calculate()
dim as ulong i , j
dim as double sum
''Apply the formula for each neuron
for i = 0 to neuroncount
sum = 0
''store the sum of all values here
for j = 0 to inputcount
''Performing function
sum += neurons[ i ]->weight[ j ] * layerinput[ j ]
''apply input * weight
sum += neurons[ i ]->wgain * neurons[ i ]->gain
''apply the gain or theta multiplied by the gain weight.
''sigmoidal activation function
neurons[ i ]->uitput = 1.0 / ( 1.0 + exp( -sum ) )
''calculate the sigmoid function
next j
next i
end sub
type t_net
private :
dim as t_layer inputlayer
dim as t_layer outputlayer
dim as t_layer ptr ptr hiddenlayer
dim as integer _hiddenlayercount
public :
declare constructor()
declare destructor()
declare sub create( inputmax as ulong , uitputmax as ulong _
, hiddenlayes as ulong ptr , hidden as ulong )
declare sub propegate( in as double ptr )
declare function train( whiseduit as double ptr _
, in as double ptr , a as doubble , m as double )
declare sub updatelayers( layerindex )
declare function uit( i as ulong ) as double
end type
constructor t_net()
end constructor
destructor t_net()
''todo :
''free memory
end constructor
sub t_net.create( inputmax as ulong , uitputmax as ulong _
, hiddenlayersize as ulong ptr , hiddenlayercount as ulong )
''todo :
''claim memory
end sub
sub t_net.propagate( in as double ptr )
''The propagation function should start from the input layer
''first copy the input vector to the input layer Always make sure the size
''"array input" has the same size of inputcount
'' memcpy(m_inputlayer.layerinput,input,m_inputlayer.inputcount * sizeof(float));
''now calculate the inputlayer
inputlayer.calculate()
update( -1 )
''propagate the inputlayer out values to the next layer
if( hiddenlayers ) then
''Calculating hidden layers if any
for i as integer = 0 to _hiddenlayercount
hiddenlayers[i]->calculate()
update(i)
next i
end if
''calculating the final statge: the output layer
outputlayer.calculate()
end sub
''Main training function. Run this function in a loop as many times needed per pattern
function t_net.train( wished as double ptr _
, in as double ptr , a as double , m as double )
''function train, teaches the network to recognize a pattern given a desired output
dim as double errorg = 0 ''general quadratic error
dim as double errorc ''local error;
dim as double sum = 0 , csum = 0
dim as double delta , udelta
dim as double uitput
''first we begin by propagating the input
propagate in
dim as ulong i , j , k
''the backpropagation algorithm starts from the output layer propagating the error from the output
''layer to the input layer
for i = 0 to outputlayer.neuroncount
''calculate the error value for the output layer
uitput = m_outputlayer.neurons[i]->uitput
''copy this value to facilitate calculations
''from the algorithm we can take the error value as
errorc = ( whised[i] - uitput) * uitput * ( 1 - uitput )
''and the general error as the sum of delta values. Where delta is the squared difference
''of the desired value with the output value
''quadratic error
errorg += ( wished[i] - uitput ) ^ 2
''now we proceed to update the weights of the neuron
for j = 0 to outputlayer.inputcount
''get the current delta value
delta = outputlayer.neurons[i]->dvalues[j]
''update the delta value
udelta = a * errorc * outputlayer.layerinput[j] + delta * m
''update the weight values
outputlayer.neurons[i]->weights[j] += udelta;
outputlayer.neurons[i]->dvalues[j] = udelta;
''we need this to propagate to the next layer
sum += outputlayer.neurons[i]->weights[j] * errorc
next j
''calculate the weight gain
outputlayer.neurons[i]->wgain+= alpha * errorc * outputlayer.neurons[i]->gain
next i
for i = hiddenlayercount - 1 to 0 step -1
for j = 0 to hiddenlayers[i]->neuroncount
uitput = hiddenlayers[i]->neurons[j]->uitput
''calculate the error for this layer
errorc = uitput * ( 1 - uitput ) * sum
''update neuron weights
for k = 0 to hiddenlayers[i]->inputcount
delta = hiddenlayers[i]->neurons[j]->dvalues[k]
udelta= a * errorc * m_hiddenlayers[i]->layerinput[k] + delta * m
hiddenlayers[i]->neurons[j]->weights[k] += udelta
hiddenlayers[i]->neurons[j]->deltavalues[k] = udelta
csum += hiddenlayers[i]->neurons[j]->weights[k] * errorc
''needed for next layer
next k
hiddenlayers[i]->neurons[j]->wgain += a * errorc * hiddenlayers[i]->neurons[j]->gain
next j
sum = csum
csum = 0
next i
''and finally process the input layer
for i = 0 to i < inputlayer.neuroncount
uitput=m_inputlayer.neurons[i]->uitput
errorc = uitput * ( 1 - uitput) * sum
for j = 0 to inputlayer.inputcount
delta = m_inputlayer.neurons[i]->dvalues[j]
udelta = a * errorc * m_inputlayer.layerinput[j] + delta * momentum;
''update weights
inputlayer.neurons[i]->weights[j] += udelta;
inputlayer.neurons[i]->dvalues[j] = udelta;
next j
''and update the gain weight
inputlayer.neurons[i]->wgain += a * errorc * m_inputlayer.neurons[i]->gain
next i
''return the general error divided by 2
return errorg / 2
end function
function t_net( o as ulong ) as double
return outputlayer[ o ].uitput
end function
function irange( l as integer , h as integer ) as integer
return int( rnd * ( h - l + 1 ) + l )
end function
screen 20 , 32
dim as t_net net
net.create( 64 * 64 , 1 , 3 , 3 )
dim as double punt( 64 * 64 )
dim as integer tel , issquare , x , y , r
while tel < 1000
x = irange( 10 , 54 )
y = irange( 10 , 54 )
r = 10
issquare = irange( 0 , 1 )
if issquare then
line( x - r , y - r ) - ( x + r , y + r ) , rgb( 255 , 255 , 255 ) , bf
else
circle( x , y ) , r , rgb( 255 , 255 , 255 ) ,,,, f
end if
for x = 0 to 64
for y = 0 to 64
punt( x + y * 64 ) = iif( point( x , y ) , 1 , 0 )
next y
next x
print net.train( issquare , punt , .5 , .1 )
wend
sleep