neural net try

General FreeBASIC programming questions.
bluatigro
Posts: 559
Joined: Apr 25, 2012 10:35
Location: netherlands

neural net try

Postby bluatigro » Jul 04, 2018 13:51

i fount some c++ code about this
i tryed to translate it

i do not get anything
what did i not good

Code: Select all

'' http://computing.dcu.ie/~humphrys/Notes/Neural/Code/index.html
'' input I[i] = any real numbers ("doubles" in C++)
'' y[j]
'' network output y[k] = sigmoid continuous 0 to 1
'' correct output O[k] = continuous 0 to 1

'' assumes throughout that all i are linked to all j, and that all j are linked to all k
'' if want some NOT to be connected, will need to introduce:
''   Boolean connected [ TOTAL ] [ TOTAL ];
'' initialise it, and then keep checking:
''   if (connected[i][j])
'' don't really need to do this,
'' since we can LEARN a weight of 0 on this link

function sigmoid( x as double ) as double
  return 1.0 / (1 + exp( -x ) )
end function

''test data
const as integer NOINPUT  = 1
const as integer NOHIDDEN = 30
const as integer NOOUTPUT = 1

const as double RATE = 0.3
const as double C = 0.1 '' start w's in range -C, C

const as integer TOTAL = NOINPUT + NOHIDDEN + NOOUTPUT

'' units all unique ids - so no ambiguity about which we refer to:

const as integer loi = 0
const as integer hii = NOINPUT - 1
const as integer loj = NOINPUT
const as integer hij = NOINPUT + NOHIDDEN - 1
const as integer lok = NOINPUT + NOHIDDEN
const as integer hik = NOINPUT + NOHIDDEN + NOOUTPUT - 1

'' input I[i] = any real numbers ("doubles" in C++)
'' y[j]
'' network output y[k] = sigmoid continuous 0 to 1
'' correct output O[k] = continuous 0 to 1


'' ssumes throughout that all i are linked to all j, and that all j are linked to all k
'' if want some NOT to be connected, will need to introduce:
''   Boolean connected [ TOTAL ] [ TOTAL ];
'' initialise it, and then keep checking:
''   if (connected[i][j])
'' don't really need to do this,
'' since we can LEARN a weight of 0 on this lin

type NeuralNetwork
  dim as integer i , j , k
  dim as double in( TOTAL )           
  dim as double y( TOTAL )           
  dim as double O( TOTAL )   
  dim as double w( TOTAL , TOTAL )  '' w[i][j]
  dim as double wt( TOTAL )         '' bias weights wt[i]
  dim as double dx( TOTAL )         '' dE/dx[i]
  dim as double dy( TOTAL )         '' dE/dy[i]
  declare sub backpropagate()
  declare sub exploit()
  declare sub forwardpass()
  declare sub init()
  declare sub learn( m as integer )
  declare sub newIO()
  declare sub reportIO()
end type

''How Input is passed forward through the network:

sub NeuralNetwork.forwardpass()
  dim as double x  '' temporary variable - x[i]
  dim as integer i , j , k
''----- forwardpass I[i] -> y[j] ------------------------------------------------
  for j = loj to hij
    x = 0
    for i = loi to hii       
      x = x + in( i ) * w( i , j )
    next i
    y( j ) = sigmoid( x - wt( j ) )
  next j
''----- forwardpass y[j] -> y[k] ------------------------------------------------
  for k = lok to hik
    x = 0
    for j = loj to hij         
      x = x + ( y( j ) * w( j , k ) ) 
      y( k ) = sigmoid( x - wt( k ) )
    next j
  next k
end sub

''Initialisation:

'' going to do w++ and w--
'' so might think should start with all w=0
'' (all y[k]=0.5 - halfway between possibles 0 and 1)
'' in fact, if all w same they tend to march in step together
'' need *asymmetry* if want them to specialise (form a representation scheme)
'' best to start with diverse w
''
'' also, large positive or negative w -> slow learning
'' so start with small absolute w -> fast learning

function range( l as double , h as double ) as double
  return rnd * ( h - l ) + l
end function

sub NeuralNetwork.init()
  dim as integer visits = 0 , i , j , k
  for i = loi to hii
    for j = loj to hij
      w( i , j ) = range( -c , c )
    next j
  next i
  for j = loj to hij
    for k = lok to hik
      w( j , k ) = range( -c , c )
    next k
  next j
  for j = loj to hij
    wt( j ) = range( -c , c )
  next j
  for k = lok to hik
    wt( k ) = range( -c , c )
  next k
end sub

''How Error is back-propagated through the network:


sub NeuralNetwork.backpropagate()
  dim as double dw '' temporary variable - dE/dw[i][j]
  dim as integer i , j , k
''----- backpropagate O[k] -> dy[k] -> dx[k] -> w[j][k],wt[k] ---------------------------------
  for k = lok to hik
    dy( k ) = y( k ) - O( k )
    dx( k ) = ( dy( k ) ) * y( k ) * ( 1- y( k ) )
  next k
''----- backpropagate dx[k],w[j][k] -> dy[j] -> dx[j] -> w[i][j],wt[j] ------------------------
''----- use OLD w values here (that's what the equations refer to) .. -------------------------
  dim as double t
  for j = loj to hij
     t = 0
    for k = lok to hik
      t = t + ( dx( k ) * w( j , k ) )
    next k
    dy( j ) = t
    dx( j ) = dy( j ) * y( j ) * ( 1 - y( j ) )
  next j
''----- .. do all w changes together at end ---------------------------------------------------
  for j = loj to hij
    for k = lok to hik
      dw = dx( k ) * y( j )           
      w( j , k ) = w( j , k ) - ( RATE * dw )
    next k
  next j
  for i = loi to hii
    for j = loj to hij
      dw = dx( j ) * in( i )           
      w( i , j ) = w( i , j ) - ( RATE * dw )
    next j
  next i
  for k = lok to hik
    dw = -dx( k )         
    wt( k ) = wt( k ) - ( RATE * dw )
  next k
  for j = loj to hij
    dw = -dx( j )
    wt( j ) = wt( j ) - ( RATE * dw )
  next j
end sub

''Ways of using the Network:

sub NeuralNetwork.learn( CEILING as integer )
  dim as integer m
  for m =1 to CEILING
    newIO
    '' new I/O pair
    '' put I into I[i]
    '' put O into O[k]
    forwardpass
    backpropagate
  next m
end sub

sub NeuralNetwork.exploit()
  dim as integer m
  for m =1 to 30
    newIO
    forwardpass
  next m
end sub

dim as NeuralNetwork net

'' input x
'' output y
'' adjust difference between y and f(x)

function f( x as double ) as double
'' return sqr( x )
  return sin( x )
'' return sin( x ) + sin( 2 * x ) + sin( 5 * x ) + cos( x )
end function

'' I = x = double lox to hix
const as double lox = 0
const as double hix = 9

'' want it to store f(x) = double lof to hif
const as double lofc = -2.5 '' approximate bounds
const as double hifc = 3.2

'' O = f(x) normalised to range 0 to 1

function normalise( t as double ) as double
  return ( t - lofc ) / ( hifc - lofc )
end function

function expand( t as double ) as double '' goes the other way
  return lofc + t * ( hifc - lofc )
end function

sub NeuralNetwork.newIO()
  dim as double x = range( lox , hix )
  dim as integer i , j , k
''there is only one, just don't want to remember number:
  for i = loi to hii
    in( i ) = x
  next i
'' there is only one, just don't want to remember number:
  for k = lok to hik                 
    O( k ) = normalise( f( x ) )     
  next k
end sub

'' Note it never even sees the same exemplar twice!

sub NeuralNetwork.reportIO()
  dim as double xx , yy
  dim as integer i , j , k
  for i = loi to hii
    xx = in( i )
  next i
  for k = lok to hik
    yy = expand( y( k ) )
  next k
  print "x    = ",    xx 
  print "y    = ",    yy
  print "f(x) = ", f( xx )
end sub

net.init
net.learn 1000
net.exploit

sleep
bluatigro
Posts: 559
Joined: Apr 25, 2012 10:35
Location: netherlands

Re: neural net try

Postby bluatigro » Aug 10, 2018 13:54

update :
tryed it whit a genetic algoritm

error :
no results jet

REM :
the NN is trying to gues if it is a square or a circle

Code: Select all

'' bluatigro 10 aug 2018

const as long factor = 2 ^ 30
screen 20 , 32
type NNet
public :
  dim as double in( 0 )
  dim as integer inmax
  dim as double wih( 0 , 0 )
  dim as double h( 0 , 0 )
  dim as integer hidcelmax
  dim as integer hidlayermax
  dim as double whh( 0 , 0 , 0 )
  dim as double whu( 0 , 0 )
  dim as double uit( 0 )
  dim as integer uitmax
  declare sub init( i as integer , hc as integer , hl as integer , u as integer )
  declare sub calculate()
  declare sub mutate( r as double )
end type
sub NNet.init( i as integer , hc as integer , hl as integer , u as integer )
  inmax = i
  hidcelmax = hc
  hidlayermax = hl
  uitmax = u
  redim as double in( inmax ) , h( hidlayermax , hidcelmax ) , uit( uitmax )
  redim as double whi( inmax , hidcelmax )
  for i = 0 to inmax
    for hc = 0 to hidcelmax
      wih( i , hc ) = rnd
    next hc
  next i
  redim as double whh( hidlayermax , hidcelmax , hidcelmax )
  for i = 0 to hidlayermax
    for hl = 0 to hidcelmax
      for hc = 0 to hidcelmax
        whh( i , hl , hc ) = rnd
      next hc
    next hl
  next i
  redim as double whu( hidcelmax , uitmax )
  for i = 0 to uitmax
    for hc = 0 to hidcelmax
      whu( hc , i ) = rnd
    next hc
  next i
end sub
function signoid( x as double ) as double
  return 1 / ( 1 + exp( -x ) )
end function
sub NNet.calculate()
  dim as integer i , hid , cel , u
  dim as double sum
  for cel = 0 to hidcelmax
    sum = 0.0
    for i = 0 to inmax
      sum += in( i ) * wih( i , cel )
    next i
    h( 0 , cel ) = signoid( sum ) / inmax
  next cel
  for i = 1 to hidlayermax
    for cel = 0 to hidcelmax
      sum = 0.0
      for hid = 1 to hidcelmax
        sum += h( i - 1 , hid ) * whh( i - 1 , hid , cel )
      next hid
      h( i , cel ) = signoid( sum ) / hidcelmax
    next cel
  next i
  for u = 0 to uitmax
    sum = 0.0
    for i = 0 to hidcelmax
      sum += h( hidlayermax , i ) * whu( i , u )
    next i
  next u
end sub
function verander( x as double ) as double
  dim as long i
  i = clng( x * factor )
  i = i xor 2 ^ int( rnd * 30 )
  return cdbl( i ) / factor
end function
sub NNet.mutate( radiation as double )
  dim as integer i , hc , hl , u
  for i = 0 to inmax
    for hc = 0 to hidcelmax
      wih( i , hc ) = iif( rnd < radiation _
      , verander( wih( i , hc ) ) , wih( i , hc ) )
    next hc
  next i
  for i = 0 to hidlayermax
    for hl = 0 to hidcelmax
      for hc = 0 to hidcelmax
        whh( i , hl , hc ) = iif( rnd < radiation _
        , verander( whh( i , hl , hc ) ) , whh( i , hl , hc ) )
      next hc
    next hl
  next i
  for i = 0 to uitmax
    for hc = 0 to hidcelmax
      whu( hc , i ) = iif( rnd < radiation _
      , verander( whu( hc , i ) ) , whu( hc , i ) )
    next hc
  next i
end sub
function mix( a as NNet , b as NNet ) as NNet
  dim as NNet uit
  uit.init a.inmax , a.hidcelmax , a.hidlayermax , a.uitmax
  dim as integer i , hc , hl , u
  for i = 0 to uit.inmax
    for hc = 0 to uit.hidcelmax
      uit.wih( i , hc ) = iif( rnd < .5 _
      , a.wih( i , hc ) , b.wih( i , hc ) )
    next hc
  next i
  for i = 0 to uit.hidlayermax
    for hl = 0 to uit.hidcelmax
      for hc = 0 to uit.hidcelmax
        uit.whh( i , hl , hc ) = iif( rnd < .5 _
        , a.whh( i , hl , hc ) , b.whh( i , hl , hc ) )
      next hc
    next hl
  next i
  for i = 0 to uit.uitmax
    for hc = 0 to uit.hidcelmax
      uit.whu( hc , i ) = iif( rnd < .5 _
      , a.whu( hc , i ) , b.whu( hc , i ) )
    next hc
  next i
  return uit
end function
function pixel( x as integer , y as integer ) as double
  return iif( point( x , y ) <> -16777216 , 1.0 , 0.0 )
end function
function range( l as integer , h as integer ) as integer
  return int( rnd * ( h - l + 1 ) + l )
end function
dim as NNet ann( 200 )
dim as integer i
for i = 0 to 200
  ann( i ).init 100 * 100 , 100 , 1 , 1
next i

function rect() as double
  cls
  dim as integer x , y , d
  d = range( 10 , 30 )
  x = range( d , 100 - d )
  y = range( d , 100 - d )
  if rnd < .5 then 
    circle ( x , y ) , d , &hffffff ,,,, f
    return 0.0
  end if
  line ( x - d , y - d ) - ( x + d , y + d ) , &hffffff , bf
  return 1.0
end function

dim as integer n , g , x , y , h , l , ry( 200 )
dim as double fout( 200 ) , try

for i = 0 to 200
  ry( i ) = i
next i

for g = 0 to 100
  for n = 0 to 200
    fout( n ) = 0.0
    for i = 0 to 100
      try = rect()
      for x = 0 to 100
        for y = 0 to 100
          ann( n ).in( x + 100 * y ) = pixel( x , y )
        next y
      next x
      ann( n ).calculate()
      fout( n ) += abs( try - ann( n ).uit( 0 ) )
    next i
  next n
  for h = 1 to 200
    for l = 0 to h - 1
      if fout( ry( h ) ) < fout( ry( l ) ) then
        swap ry( h ) , ry( l )
      end if
    next l
  next h
  for i = 20 to 200
    x = range( 0 , 20 )
    y = range( 0 , 20 )
    ann( i ) = mix( ann( x ) , ann( y ) )
    ann( i ).mutate 1e-5
  next i
  print g , fout( ry( 0 ) )
next g   
   
   

sleep

Return to “General”

Who is online

Users browsing this forum: No registered users and 4 guests