SparseNet

User projects written in or related to FreeBASIC.
Post Reply
dafhi
Posts: 1641
Joined: Jun 04, 2005 9:51

SparseNet

Post by dafhi »

Code: Select all

/' -- SparseNet 2018 August 21 - by dafhi

    A sparse net could speed up computation in a 'pre-pruning'
    sort of way.
    
    Still experimental, there is much to be learned.
  
    - update
    visualization.  much larger code base  :D
 ----------------------------------------------------------- '/


' ---------------------- general ---------------------------
type myint as integer
type float as single

#Ifndef floor   '' http://www.freebasic.net/forum/viewtopic.php?p=118633
#Define floor(x) (((x)*2.0-0.5)shr 1)
#define ceil(x) (-((-(x)*2.0-0.5)shr 1))
  #EndIf

function randc(c as ulong, bas as double=0)as long
    return floor(rnd*c + bas)
End Function

#define wfor(v, initial, cond, ste)  v=initial-ste: while cond: v+=ste
' ----------------------------------------------------------


' ---------- visualization ---------------------------------
'
#Macro Alpha256(ret,back, fore, a256) '2017 Mar 26
  ret=((_
  (fore And &Hff00ff) * a256 + _
  (back And &Hff00ff) * (256-a256) + &H800080) And &Hff00ff00 Or (_
  (fore And &H00ff00) * a256 + _
  (back And &H00ff00) * (256-a256) + &H008000) And &H00ff0000) Shr 8
#EndMacro

type imagevars
    
    /' -- a class created for
      .seamless screen & image interaction
      .encapsulate standard metrics
      .convenient additional vars, subs and functions
      .quick reference for ScreenInfo & ImageInfo
      
    -- 2018 Aug 20, by dafhi - '/
    
    as myint              w,h,bpp,bypp,pitch
    as myint              rate, num_pages, flags
    as any ptr            im, pixels
    as string             driver_name
    
    declare constructor   (as any ptr=0)
    declare destructor
    
    declare sub           get_info(as any ptr=0)
    declare sub           create(as short, as short, as ulong=rgb(255,0,255))
    declare sub           cls(as ulong=rgb(0,0,0))
    declare sub           Plot(As long,As long,As Ulong,As float)
    declare sub           wuLine(As float,As float,As float,As float,As Ulong = -1)
    
    as single             wh, hh, diagonal, scale 'helpers
    as myint              wm, hm, pitchBy, ub     '
    as ulong ptr          p32                     '
   
   private:
    declare sub           release
end type

Destructor.imagevars:  release
End Destructor

Sub imagevars.release
  If ImageInfo(im) = 0 Then ImageDestroy im
  im = 0
End Sub

constructor.imagevars(im as any ptr) ' 2018 June 28
  if im=0 then exit constructor
  if ImageInfo(im) = 0 then get_info im
end constructor

sub imagevars.get_info(im as any ptr)
    release
    if im=0 then:  pixels=screenptr
      ScreenInfo w,h, bpp,, pitch, rate, driver_name:  bypp=bpp\8 '2018 Jan 9
    elseif Imageinfo(im)=0 then
      ImageInfo im, w, h, bypp, pitch, pixels
      bpp = bypp * 8:  this.im = im
    endif:  pitchBy=pitch\bypp:  p32=pixels:  ub = w*h-1
    wm=w-1: wh=w/2:  
    hm=h-1: hh=h/2:  diagonal = sqr(wm*wm+hm*hm)
    scale = sqr(w*w+h*h) / sqr(.5) / 2
end sub

sub imagevars.create(w as short, h as short, col as ulong)
    get_info imagecreate(w, h, col)
END SUB

sub imagevars.cls(col as ulong)
    for y as ulong ptr = p32 to @p32[hm * pitchBy] step pitchBy
      for x as ulong ptr = y to y+wm
        *x = col
      Next:  Next
End Sub


/' ------ Xiaolin Wu's line algorithm ----------------------

An algorithm for line antialiasing,
which was presented in the article
An Efficient Antialiasing Technique
in the July 1991 issue of Computer
Graphics, as well as in the article
Fast Antialiasing in the June 1992
issue of Dr. Dobb's Journal.

https://www.freebasic.net/forum/viewtopic.php?f=7&t=24443
 ----------------------------------------------------------- '/

#define _ipart(x) floor(x) ' integer part 
#define _round(x) floor(x + .5) 
#define _fpart(x) Frac(x)    ' fractional part
#Macro  _rfpart(x)
' 1 - Frac(x)    ' seems to give problems for very small x
IIf(1 - Frac(x) >= 1, 1, 1 - Frac(x))
#EndMacro

Sub imagevars.Plot(x As long, y As long,baseclr As Ulong, c As float)
    if x < 0 orelse x>wm orelse y<0 orelse y>hm then exit sub
    x += y * pitchBy
    y = c * 257 - .5 '' 0..256
    Alpha256(p32[x], p32[x], baseclr, y)
End Sub

sub imagevars.wuLine(x0 As float,y0 As float,x1 As float,y1 As float, clr As Ulong = -1)
    Dim As Integer steep = Abs(y1 - y0) > abs(x1 - x0)
    Dim As float dx,dy,gradient,xend,yend,xgap,xpxl1,ypxl1,xpxl2,ypxl2,intery
   
    if steep then
        Swap x0, y0
        Swap x1, y1
    end If
   
    if x0 > x1 then
        Swap x0, x1
        Swap y0, y1
    end if
   
    dx = x1 - x0
    dy = y1 - y0
    gradient = dy / dx
   
    ' handle first endpoint
    xend = _round(x0)
    yend = y0 + gradient * (xend - x0)
    xgap = _rfpart(x0 + 0.5)
   
    xpxl1 = xend ' this will be used in the main loop
    ypxl1 = _ipart(yend)
   
    if steep then
        plot(ypxl1,   xpxl1, clr, _rfpart(yend) * xgap)
        plot(ypxl1+1, xpxl1, clr,  _fpart(yend) * xgap)
    else
        plot(xpxl1, ypxl1  , clr, _rfpart(yend) * xgap)
        plot(xpxl1, ypxl1+1, clr,  _fpart(yend) * xgap)
    end if
    intery = yend + gradient ' first y-intersection for the main loop
   
    ' handle second endpoint
    xend = _round(x1)
    yend = y1 + gradient * (xend - x1)
    xgap = _fpart(x1 + 0.5)
   
    xpxl2 = xend 'this will be used in the main loop
    ypxl2 = _ipart(yend)
   
    if steep then
        plot(ypxl2  , xpxl2, clr, _rfpart(yend) * xgap)
        plot(ypxl2+1, xpxl2, clr,  _fpart(yend) * xgap)
    else
        plot(xpxl2, ypxl2, clr,  _rfpart(yend) * xgap)
        plot(xpxl2, ypxl2+1, clr, _fpart(yend) * xgap)
    end if
   
    for x As Integer = xpxl1 + 1 to xpxl2 - 1
      if steep then
          plot(_ipart(intery)  , x, clr, _rfpart(intery))
          plot(_ipart(intery)+1, x, clr,  _fpart(intery))
      else
          plot(x, _ipart(intery), clr,  _rfpart(intery))
          plot(x, _ipart(intery)+1, clr, _fpart(intery))
      end if
      intery = intery + gradient
    Next
   
End sub
'
' ------------- visualization


/' -- randomization / testing
 
 Permuted Congruence Generator - (c) 2014 M.E. O'Neill / pcg-random.org
 Freebasic adtaptation by dafhi, et. al.  2018 Aug 21

'/ 

const as ulongint         multiplier = 6364136223846793005

dim shared as ulongint    state      = &h4d595df4d0f33173      ' Or something seed-dependent
dim shared as ulongint    increment  = 1442695040888963407   ' Or an arbitrary odd constant

dim shared as uinteger    gx
dim shared as ulongint    gcount

function rotr32(x as ulong, r as uinteger) as ulong
   return (x shr r) or (x shl (-r and 31))
end function

function fpcg32() as ulong
   gx = state
   gcount = cuint(gx shr 59)';      // 59 = 64 - 5
   state = gx * multiplier + increment
   gx xor= gx shr 18';                        // 18 = (64 - 27)/2
   return rotr32(cuint(gx shr 27), gcount)';   // 27 = 32 - 5
end function

function randse as double
  Return fpcg32/4294967296.0
end function

sub pcg32_init(seed as ulongint=0)
   state = seed + increment
  fpcg32
end sub
' ---------- RNG

' --------- SparseNet ------------------------------------
'
type                      weight_type as ubyte

type axon
    as weight_type        w
    as myint              isrc
End Type

const as long             weight_mod = clngint(1) shl ( len(weight_type) * 8 )

type tNeuron
    as myint              u = -1
    as axon               a(any)
    declare sub           new_axon(as myint = 0)
End Type
sub tNeuron.new_axon(_isrc as myint):  u += 1
    redim preserve a(u):  a(u).isrc = _isrc':  a(u).w = 0
End Sub

type layer_partition_info
    as myint              u = -1, l, c
    declare sub           init(as myint = 1)
    declare sub           relative(as single = 1, as myint = 0)
End Type
sub layer_partition_info.init(_c as myint):  c = _c:  u = c - 1
End Sub
sub layer_partition_info.relative(size_mul as single, _c as myint)
    var p = @this:  p -= 1
    c = p->c * size_mul + _c
    l = p->u + 1:  u = l + c - 1
End Sub

#define                   rand_weight   randc(weight_mod)

type                      bias_type as ubyte '' same as input stream unit .. (1 for now)
'const                     bias_mod = 1 shl ( len(bias_type) * 8 )
'const as myint            bias_base = (bias_mod-1)/ 2
'#define                   rand_bias     rnd * bias_mod - bias_base

type nodes_and_biases
    as bias_type ptr        p
    as single               thresh
    as layer_partition_info           info(any)
    as tNeuron              n(any)
    'as bias_type          b(any)
    declare sub             init(as any ptr, as myint, as myint = -1)
    'declare sub             _connect(byref as layer_partition_info ptr, byref as layer_partition_info ptr=0)
    declare sub             _connect(byref as layer_partition_info ptr, byref as layer_partition_info ptr=0, as boolean = false)
    declare property        is_layer_out as boolean
    as myint                i, j, ii, jj
End Type
sub nodes_and_biases.init(_p as any ptr, c as myint, l as myint)
    redim info(0): info(0).c=c: info(0).u=c-1: p=_p
    if l >= 0 then  redim n(l to l + c-1)', b(l to l + c-1)
End Sub
sub nodes_and_biases._connect(byref des as layer_partition_info ptr, byref src as layer_partition_info ptr, sparse as boolean)
    if src = 0 then src = des - 1
    
    if sparse then
      for i as myint = des->l to des->u
        n(i).new_axon randc(src->c, src->l)
        n(i).a(n(i).u).w = rand_weight
      Next
    else
      for i as myint = des->l to des->u
        for j as myint = src->l to src->u
          n(i).new_axon j
          n(i).a(n(i).u).w = rand_weight
        Next
      Next
    endif
    
End Sub

property nodes_and_biases.is_layer_out as boolean
    return lbound(n)=0
End Property


' ---------- visualization sub seperate from class
'
sub _layervis(byref imv as imagevars ptr, y0 as float, y1 as float, _
                        src as nodes_and_biases ptr, des as nodes_and_biases ptr, _
                        isrc_lpi as myint, ides_lpi as myint)
    
    var border  = 1
    var draw_x0 = border
    var draw_x1 = imv->wm - border
    var dpix = draw_x1 - draw_x0
    
    y0 = y0 * imv->hm
    y1 = y1 * imv->hm
    
    var lpi_des = @des->info(ides_lpi)
    var lpi_src = @src->info(isrc_lpi)
    
    var xa_delt = dpix / (lpi_des->u + 2)
    var xb_delt = dpix / (lpi_src->u + 2)
    
    var i=0, j=0
    wfor( i, lpi_des->l, i < lpi_des->u, 1 )
      var xi = draw_x0 + (i+1) * xa_delt
      wfor( j, 0, j < des->n(i).u, 1 )
        var pax = @des->n(i).a(j)
        var jj = pax->isrc
        var xj = draw_x0 + (jj+1) * xb_delt
        imv->wuline xi, y0, _
                    xj, y1, rgb(pax->w,pax->w,255)
      wend
    wend
    
End Sub


type tSparseNet
   
    ' bn_out & bn each contain a 1d array of neurons, 1d array of biases.
    ' partitions (layers) are tracked using layer_partition_info() in nodes_and_biases
   
    as nodes_and_biases     bn_out, bn
    declare sub             grab_io(as any ptr, as myint, as any ptr, as myint, as boolean = true)
    declare sub             new_layer(as myint = 0, as float = 0, as myint = -1)
    declare sub             connect_output_layer(as myint = -1)
    declare sub             deep_connect(byref as nodes_and_biases ptr, as myint = 0, as myint = 0)
    declare sub             layer_vis(byref as imagevars ptr, as float=0, as float=1, as myint=0)
    declare sub             visualize(byref as imagevars ptr)
    declare function        wsum(as nodes_and_biases ptr, as myint)as float
    declare property        input_data as string
    declare property        get_er as double
    declare sub             rand
    as myint                focus_neuron
    as float                er, e_high, sqr_er
   private:
    as bias_type            d(any)
    declare sub             randomize_neuron(as nodes_and_biases ptr, byref as layer_partition_info ptr, as myint)
    as myint                sparse', isrc
    as myint                cb_unit, i_temp, m_w, m_isrc
    as float                iw
end type

sub tSparseNet.grab_io(src as any ptr, cb_src as myint, des as any ptr, cb_des as myint, fully_connected_layers as boolean)
    
    cb_unit = 1 '' will maybe work with 4-byte elements in future
    bn.init src, cb_src
    bn_out.init des, cb_des, 0
    sparse = not fully_connected_layers
    
    'experimentatal
    if fully_connected_layers then iw = .25 / (weight_mod)
End Sub

property tSparseNet.input_data as string
    dim as string ret
    for i as long = 0 to bn.info(0).u
      ret += chr(bn.p[i])
    Next
    return ret
End Property

sub tSparseNet.connect_output_layer(_sparse as myint)
    if _sparse<0 then _sparse = sparse
    bn_out._connect @bn_out.info(0), @bn.info(ubound(bn.info)), _sparse
    
    'experimentatal
    if _sparse then iw = .75 / (weight_mod)
End Sub

sub tSparseNet.deep_connect(byref des as nodes_and_biases ptr, offsrc as myint, offdes as myint)
    offdes = abs(offdes):   if offsrc > 0 then offsrc = -offdes - 1
    var udes = ubound(des->info) - offdes
    var usrc = ubound(bn.info) - offsrc
    if des->is_layer_out then
      if udes <> 0 then               beep: ? "deep connect:  bn_out index must be 0": sleep 500: exit sub
    else
      if usrc >= udes then            beep: ? "deep connect:  src must be < des": sleep 500: exit sub
    EndIf
    if usrc < 0 then                  beep: ? "deep connect:  src offset too low": sleep 1500: exit sub
    
    sparse = true
    des->_connect @des->info(udes), @bn.info(usrc), sparse
     
    'experimentatal
    iw = .75 / (weight_mod)
End Sub

sub tSparseNet.new_layer(c as myint, siz_mul As float, _sparse as myint)
    var u = ubound(bn.info)+1:  redim preserve bn.info(u)
    bn.info(u).relative siz_mul, c
    var ll = bn.info(0).c, uu = bn.info(u).u
    redim preserve bn.n(ll to uu)', bn.b(ll to uu)
    if _sparse < 0 then _sparse = sparse
    bn._connect @bn.info(u),, _sparse
End Sub

sub tSparseNet.layer_vis(byref imv as imagevars ptr, y0 as float, y1 as float, off as myint)

    var u = ubound(bn.info)
    var output_layer = off=0 orelse u=0
    
    if output_layer then
      
      _layervis imv, y0, y1, @bn, @bn_out, u, 0
    
    else
      
      /'
        Let's say offset is 1.  Caller wants to drop beneath the output
        layer.  If "off" stays at 1 and we subtract it  from
        ubound(hidden layers), we will miss the top hidden layer.
        
        So decrement the offset.
      '/
      
      off = abs(off) - 1
      
      u -= off      
      if u > 0 then _layervis imv, y0, y1, @bn, @bn, u-1, u
    EndIf
End Sub

sub tSparseNet.visualize(byref imv as imagevars ptr)
    var u = ubound(bn.info)
    var gfx_step = 1 / (u + 1.1)
    layer_vis imv, 0, gfx_step
    for i as long = 1 to u
      layer_vis imv, i*gfx_step, (i+1)*gfx_step, i
    next
End Sub

sub tSparseNet.randomize_neuron(p as nodes_and_biases ptr, byref layer_src as layer_partition_info ptr, i as myint)
    var j = randc(p->n(i).u+1)
    'for j as myint = 0 to p->n(i).u
      if rnd < .5 then p->n(i).a(j).w = rand_weight
      dim byref as myint isrc = p->n(i).a(j).isrc
      if sparse andalso rnd < .5 then isrc = randc(layer_src->c, layer_src->l)
      if isrc > bn.info(0).u then
        randomize_neuron @bn, layer_src - 1, isrc
      EndIf
    'Next
End Sub

sub tSparseNet.rand
    if rnd < er^1.1 then
      for i as myint = 0 to bn_out.info(0).u
        randomize_neuron @bn_out, @bn.info(ubound(bn.info)), i
      Next
    else
      randomize_neuron @bn_out, @bn.info(ubound(bn.info)), focus_neuron
    endif
End Sub

function tSparseNet.wsum(p as nodes_and_biases ptr, i as myint) as float
    dim as float er_axon_sum
    for i_axon as myint = 0 to p->n(i).u
      m_w = p->n(i).a(i_axon).w
      if m_w > 0 then
        m_isrc = p->n(i).a(i_axon).isrc
        if m_isrc < bn.info(0).c then
          er_axon_sum += p->n(i).a(i_axon).w * iw * bn.p[m_isrc]
        else
          er_axon_sum += p->n(i).a(i_axon).w * iw * wsum(@bn, m_isrc)
        EndIf
      endif
    Next:  return (er_axon_sum)
End Function

property tSparseNet.get_er as double
    e_high = 0:  er = 0
    for i as myint = 0 to bn_out.info(0).u
      if bn_out.n(i).u >= 0 then
        var e = ( bn_out.p[i] - wsum( @bn_out, i ) ) / 255
        e *= e
        if e > e_high then e_high = e: focus_neuron = i
        er += e
      endif
    Next:  er /= (bn_out.info(0).c)
    sqr_er = er
    return er
End Property

' -------------------

sub show(byref des as imagevars ptr, byref im as imagevars ptr, byref net as tSparseNet ptr, er_best as float, iter as myint)
    screenlock
      cls
       ? "error "; er_best; tab(34); "iter"; tab(39); iter
      locate 5,1
      dim as string res, original
      for i as myint = 0 to net->bn_out.info(0).u
        res += chr(net->wsum( @net->bn_out, i ))
        original += chr(net->bn_out.p[i])
      Next
      im->cls
      net->visualize im
      'net->layer_vis im, 0, 1, 1
      put (des->w - im->w, des->h - im->h), im->im, pset
      ? "input data", net->input_data
      ?
      ? "output data",original
      ? "network",res
    screenunlock
End Sub


sub Main
 
    #undef rnd
    #define rnd randse

    pcg32_init
 
    '' network to match data_out at end of training
    dim as string data_in = "00011011"
    dim as string data_out = "0000000100100011010001010110011110001001101010111100110111101111"
    data_out = "0000 0001 0010 0011 0100 0101 0110 0111 1000 1001 1010 1011 1100 1101 1110 1111"
    'data_out = "123456789"
    'data_out = "0110"
   
    dim as tSparseNet net
   
    net.grab_io @data_in[0], len(data_in), @data_out[0], len(data_out)
   
    ' --------------------------------------------
   
    if 0 then
      net.new_layer 3
      net.connect_output_layer
    else
      net.new_layer 8
      'net.new_layer 12
      'net.new_layer 8
      var c = net.bn.info( ubound(net.bn.info) ).c
      for i as myint = 1 to sqr(c)
        net.deep_connect @net.bn_out, 0, 0
      next
    endif
   
       
    ' -------------------------

    dim as tSparseNet net_best = net
    var er_best = net.get_er
   
    screenres 800, 600, 32
    dim as imagevars  buf:  buf.get_info
    
    dim as imagevars im = imagecreate(buf.w,350)
    var u = 15999
   
    for i as myint = 0 to u

      ' --- evolution ---
      net.rand
      var er = net.get_er
      if er < er_best then
        net_best = net
        er_best = er
      elseif er > er_best then
        net = net_best
      EndIf
      ' ---
     
      if i mod 50=0 then
        show @buf, @im, @net, er_best, i
        sleep 10
      endif
     
      if inkey<>"" then exit for
    next
   
    show @buf, @im, @net, er_best, u
   
    ?
    ?
    ? "done."
    sleep

End Sub

Main
Last edited by dafhi on Sep 05, 2018 1:29, edited 16 times in total.
BasicCoder2
Posts: 3906
Joined: Jan 01, 2009 7:03
Location: Australia

Re: RandNet

Post by BasicCoder2 »

It is unclear to me what problem is being solved here?
You prove an ANN's viability by giving it a problem to solve.

The number of possible wiring combinations can quickly become enormous as the number of components increases and thus cannot be solved in real time by trying different randomly wired units. The whole point about an ANN is that it can learn via techniques such as back propagation to move its wiring toward a better and better solution. Only the first attempt is random because you have to start somewhere.
dafhi
Posts: 1641
Joined: Jun 04, 2005 9:51

Re: RandNet

Post by dafhi »

afaik, backprop is only used to train a model (net)

here's a different dataset

Code: Select all

    dim as string a = "00011011"
    dim as string b = "0110"
BasicCoder2
Posts: 3906
Joined: Jan 01, 2009 7:03
Location: Australia

Re: RandNet

Post by BasicCoder2 »

It would help if you could explain with words what the program is actually doing.
I notice the word "evolutionary" in the top comments.
You can evolve a net which is a kind of learning by selecting the best of a set of random networks and mutating some of their connections then repeating that process. Is that what you are doing?
What exactly is string a and string b?
You run the program and it does what before printing string b?
I am responding because learning machines interest me but I am unsure what exactly this programs does?
dafhi
Posts: 1641
Joined: Jun 04, 2005 9:51

Re: RandNet

Post by dafhi »

this net is evolutionary in that it randomizes weights, biases and it will even rewire.

the basic neuron i imagine having inputs (axons) coming from the left.
an axon stores neuron source index (also coming from the left), and a weight.
the neuron receiving the signal adds a bias.
neuron can have several axons (again, coming from the left)

the output layer, bn_out (biases, nodes out) is an array of neurons overlapping the output data, which, when compared, gives the error. you can tap a neuron with wsum (see the last loop in Main)
sean_vn
Posts: 283
Joined: Aug 06, 2012 8:26

Re: RandNet

Post by sean_vn »

I'm not using FB at the moment because of the graphic problem I have on linux amd64. I might try using FB with EGGX graphics library: https://www.ir.isas.jaxa.jp/~cyamauch/eggx_procall/ but that would mean writing an extensive .bi file. It seems easier to just use it from C.
Anyway I had this to say about quantization and evolution:
https://groups.google.com/forum/#!topic ... aKEE0gGGoA
dafhi
Posts: 1641
Joined: Jun 04, 2005 9:51

Re: RandNet

Post by dafhi »

check out my update.

in my network, a re-wire might emulate 'squashing.' i don't have much experience with the numbers, nor many of the patterns associated with just the simple process of mutation

as i was replying to BasicCoder, i though what exactly is mutation? how is it triggered?

i was surprised to find that a small randomization chance converges faster than my original thought of 40%
BasicCoder2
Posts: 3906
Joined: Jan 01, 2009 7:03
Location: Australia

Re: RandNet

Post by BasicCoder2 »

sean_vn wrote:Anyway I had this to say about quantization and evolution:
My understanding of quantization is one of fidelity. Copy a digital picture many times and providing the noise is below the threshold each copy will remain the same. Copy an analogue picture many times and it quickly degrades into noise. With a digital image (or data) you can add redundancy to protect against the occasional large noise spike that might change a data value.
BasicCoder2
Posts: 3906
Joined: Jan 01, 2009 7:03
Location: Australia

Re: RandNet

Post by BasicCoder2 »

@dafhi,
The reason I would have liked it explained in detail, in words, is the code is too long and complicated for me to follow or decipher in a reasonable time to figure out how and what it is actually doing. From what I can make out from the last example you claim to have evolved a net that given 00011011 as input it will output the data_out string but how it does that from random weight changes is unclear to me. I just can't see what is going on ...

I just feel there is something not quite right as random changes in a complex system should not converge, it should just jump all over the place.
dafhi
Posts: 1641
Joined: Jun 04, 2005 9:51

Re: RandNet

Post by dafhi »

the net udt is a set of lego bricks

convegence happens in main()

Code: Select all

      if er < er_best then
        net_best = net
        er_best = er
      elseif er > er_best then
        net = net_best
      EndIf
sean_vn
Posts: 283
Joined: Aug 06, 2012 8:26

Re: RandNet

Post by sean_vn »

Well, you can quantize 10.68372892 to 10.6 or 10 or 11, whatever way you want to do it. I was just saying there are some reasons to avoid anything like if possible when trying to solve problems by evolution.
..change of topic..
I was looking at Fortran as an alternative to FB. However I read that Basic actually was created as an improved version of Fortran.
Fortran has continued to be used at the highest tiers of scientific computing while Basic is rather looked down on. That's a bit unfortunate.
dafhi
Posts: 1641
Joined: Jun 04, 2005 9:51

Re: RandNet

Post by dafhi »

BasicCoder2

Code: Select all

var x_target = 45f
var y_target = 25f

var x_best = 0f, y = 0f
var y_best = 0f, x = 0f

var dx = x_target - x_best
var dy = y_target - y_best

var dist_best = sqr(dx*dx+dy*dy)

var mutation_magnitude = dist_best * 2.1

for i as long = 1 to 99
    
    x += (rnd-.5) * mutation_magnitude
    y += (rnd-.5) * mutation_magnitude
    
    dx = x_target - x
    dy = y_target - y
    
    var dist = sqr(dx*dx+dy*dy) 'error function
  
    ' convergence happens here
    if dist < dist_best then
      x_best = x                'evolving baseline
      y_best = y
      dist_best = dist
      ? "error: "; dist_best
      mutation_magnitude = dist_best * 2.1
    else
      x = x_best                'if result was worse, restore to previous state
      y = y_best
    endif
    
Next
? x_best, y_best

sleep
BasicCoder2
Posts: 3906
Joined: Jan 01, 2009 7:03
Location: Australia

Re: RandNet

Post by BasicCoder2 »

Ok I just noticed "Permuted Congruence Generator" in the top comments.
I was confused because I thought this was a neural network of some kind that could learn input/output patterns.
By random network I assume you actually mean a network that produces random numbers.
Sorry I messed up the thread. I would go back and erase my posts if possible.
cbruce
Posts: 163
Joined: Sep 12, 2007 19:13
Location: Dallas, Texas

Re: SparseNet

Post by cbruce »

.
BasicCoder2, I'm pretty sure it is an attempt at a sparse neural network.

dafhi implemented the PCG PRNG function in FB language simply to have control over the type of random numbers that he wanted to utilize in his routines. (He also implemented Xiaolin Wu's line algorithm, etc.).

It appears that most of dafhi's example code is used for creating a visual display of the the program's results.
.
dafhi
Posts: 1641
Joined: Jun 04, 2005 9:51

Re: SparseNet

Post by dafhi »

thanks for your interest. I have a new version that's almost learning.

busy on other projects :-)
Last edited by dafhi on Sep 05, 2018 1:55, edited 1 time in total.
Post Reply