Re: Neural networks from scratch in forth

Liste des GroupesRevenir à cl forth 
Sujet : Re: Neural networks from scratch in forth
De : melahi_ahmed (at) *nospam* yahoo.fr (Ahmed)
Groupes : comp.lang.forth
Date : 02. Dec 2024, 21:16:48
Autres entêtes
Organisation : novaBBS
Message-ID : <e8095b4c566e1df3c7179a0534d2442b@www.novabbs.com>
References : 1
User-Agent : Rocksolid Light
Hi again,
Here is the program neural_networks.fs.
-----------The code begins here---------------
include random.fs     \ for Gforth
\ include random3.frt   \ for iForth
\ : random choose ;       \ for iForth and vfxForth
\ ------------
\ Net construction
\ -------------
\ activation functions
: dllinear ( f: x -- x 1) 1e ;
: sigmoid fnegate fexp 1e f+ 1/f ;
: dlsigmoid ( f: x -- y y' )
    sigmoid fdup fdup 1e fswap f- f*
;
: dlatan fdup fatan fswap fdup f* 1e f+ 1/f ;
: dltanh ftanh fdup fdup f* 1e fswap f- ;
defer act_func
' dlatan is act_func
defer act_func_ol
' dllinear is act_func_ol
\ neural net
defer net
variable count_layer
: >count_layer count_layer ! ;
: count_layer> count_layer @ ;
: (neuralnet)
   create dup , swap , 0 , 0 do , 0 , loop , 0 ,
;
: layer_address net cell+ count_layer @ 2* cells + cell+ ;
: set_layer_address here layer_address ! ;
: get_layer_address layer_address @ ;
: neurons_current_layer  net cell+ count_layer @    2* cells + @ ;
: neurons_previous_layer net cell+ count_layer @ 1- 2* cells + @ ;
: neurons_next_layer     net cell+ count_layer @ 1+ 2* cells + @ ;
: input_layer
   0 count_layer !
   set_layer_address
   neurons_current_layer 0 do 0e f, loop \ O
;
: add_layer
   1 count_layer +!
   set_layer_address
   neurons_current_layer 0 do 0e f, loop \ O
   neurons_current_layer 0 do 0e f, loop \ Op
   neurons_current_layer 0 do 0e f, loop \ D
   neurons_current_layer 0 do 0e f, loop \ B
   neurons_current_layer 0 do 0e f, loop \ dB
   neurons_current_layer 0 do
     neurons_previous_layer 0 do \ weights
       0e f, \ w
     loop
   loop
   neurons_current_layer 0 do
     neurons_previous_layer 0 do \ dweights
       0e f, \ dw
     loop
   loop
;
: output_layer add_layer ;
\ inputs/outputs
100 value n_inputs_max
100 value n_outputs_max
create inputs          n_inputs_max  floats allot
create outputs         n_outputs_max floats allot
create desired_outputs n_outputs_max floats allot
variable n_inputs
variable n_outputs
: get_n_inputs  net cell+ @ ;
: get_n_outputs net cell+ net @ 0 do 2 cells + loop 2 cells + @ ;
\ neuralnet
: neuralnet:
    (neuralnet)
;
: net_layers
    input_layer
    net @ 0 do
      add_layer
    loop
    output_layer
    get_n_inputs  n_inputs !
    get_n_outputs n_outputs !
;
: th_layer ( l -- a)
    count_layer ! get_layer_address
;
: O ( nl al i -- nl al a)
    floats
    over +
;
: Op ( nl al i -- nl al a)
    >r
    over r> + floats
    over +
;
: D ( nl al i -- nl al a)
    >r over 2* r> + floats
    over +
;
: B ( nl al i -- nl al a)
    >r
    over 3 * r> + floats
    over +
;
: dB ( nl al i -- nl al a)
    >r
    over 4 * r> + floats
    over +
;
: W ( np nl al d s -- np nl al a)
    rot >r >r >r
    2dup 5 * swap
    r> * + r> + floats r> tuck +
;
: dW ( np nl al d s -- np nl al a)
    rot           ( np nl d s al)
    >r >r >r      ( np nl) ( r: al s d)
    2dup swap dup ( np nl nl np np)
    r> * r> +     ( np nl nl np np*d+s) ( r: al)
    rot rot       ( np nl np*d+s nl np)
    5 + * +        ( np nl np*d+s+nl*[np+5])
    floats
    r> tuck +
;
: calc_input_layer   \ input layer
     0 count_layer !
     neurons_current_layer
     get_layer_address
     neurons_current_layer 0 do
       inputs i floats + f@
       i O f!
     loop
     2drop
;
: calc_hidden_layers   \ hidden layers
    net @ 0 do
      i 1+ count_layer !
      neurons_previous_layer
      neurons_current_layer
      get_layer_address
      neurons_current_layer 0 do
        i B f@
        neurons_previous_layer 0 do
          j i W f@
          -1 count_layer +!
          neurons_current_layer
          get_layer_address
          i O f@
          f* f+
          2drop
          1 count_layer +!
        loop
        act_func
        i Op f!
        i O  f!
      loop
      2drop drop
    loop
;
: calc_output_layer   \ output layer
    net @ 1+ count_layer !
    neurons_previous_layer
    neurons_current_layer
    get_layer_address
    neurons_current_layer 0 do
      i B f@
      neurons_previous_layer 0 do
        j i W f@
        -1 count_layer +!
        neurons_current_layer
        get_layer_address
        i O f@
        f* f+
        2drop
        1 count_layer +!
      loop
      act_func_ol
      i Op f!
      i O  f!
    loop
    2drop drop
;
: >outputs \ outputs
    net @ 1+ count_layer !
    neurons_current_layer
    get_layer_address
    neurons_current_layer 0 do
      i O f@
      outputs i floats + f!
    loop
    2drop
;
: forward_pass
    calc_input_layer
    calc_hidden_layers
    calc_output_layer
    >outputs
;
\ ------------------------
\ Learning with Backpropagation Algorithm
\ ------------------------
\ the criterion: cost
fvariable cost
: calc_cost
    0e
    n_outputs @ 0 do
      outputs         i floats + f@
      desired_outputs i floats + f@
      f- fdup f* f+
    loop
    0.5e f*
    cost f!
;
: see_cost forward_pass calc_cost ;
: .cost see_cost cost f@ f. ;
\ init W and B randomly
: (frandom) 10000000000 dup s>f random s>f fswap f/ ;
: frandom ( f: a--b) (frandom) f* ;
: frand ( f: a -- b) fover f- frandom f+ ;
defer init_dweights
defer init_dbiases
defer init_weights
defer init_biases
: init_dweights_1
  net @ 1+ 0 do
    i 1+ count_layer !
    neurons_previous_layer
    neurons_current_layer
    get_layer_address
    neurons_current_layer 0 do
      neurons_previous_layer 0 do
        0e j i dW f!
      loop
    loop
  2drop drop
  loop
;
: init_dbiases_1
  net @ 1+ 0 do
    i 1+ count_layer !
    neurons_current_layer
    get_layer_address
    neurons_current_layer 0 do
      0e i dB f!
    loop
    2drop
  loop
;
: init_weights_1
  net @ 1+ 0 do
    i 1+ count_layer !
    neurons_previous_layer
    neurons_current_layer
    get_layer_address
    neurons_current_layer 0 do
      neurons_previous_layer 0 do
        -1e-1 1e-1 frand j i W f!
      loop
    loop
    2drop drop
  loop
;
: init_biases_1
  net @ 1+ 0 do
    i 1+ count_layer !
    neurons_current_layer
    get_layer_address
    neurons_current_layer 0 do
      -1e-1 1e-1 frand i B f!
    loop
    2drop
  loop
;
: init_weights_2
  net @ 1+ 0 do
    i 1+ count_layer !
    neurons_previous_layer
    neurons_current_layer
    get_layer_address
    neurons_current_layer 0 do
      neurons_previous_layer 0 do
        -1e 1e frand j i W f!
      loop
    loop
    2drop drop
  loop
;
: init_biases_2
  net @ 1+ 0 do
    i 1+ count_layer !
    neurons_current_layer
    get_layer_address
    neurons_current_layer 0 do
      -1e 1e frand i B f!
    loop
    2drop
  loop
;
' init_dweights_1   is init_dweights
' init_dbiases_1    is init_dbiases
' init_weights_1    is init_weights
' init_biases_1     is init_biases
: init_net
    init_dweights
    init_dbiases
    init_weights
    init_biases
;
\ deltas
: calc_deltas_output_layer
    net @ 1+ count_layer !
    neurons_current_layer
    get_layer_address
    neurons_current_layer 0 do
      outputs         i floats + f@
      desired_outputs i floats + f@
      f-
      i D f!
    loop
    2drop
;
: calc_deltas_hidden_layers
    0 net @ 1- do
      i 1+ count_layer !
      neurons_current_layer
      get_layer_address
      neurons_current_layer 0 do
        i Op f@
        count_layer @ 1+ count_layer !
        neurons_previous_layer
        neurons_current_layer
        get_layer_address
        0e
        neurons_current_layer 0 do
          i D f@
          i j W f@
          f* f+
        loop
        2drop drop
        count_layer @ 1- count_layer !
        f*
        i D f!
      loop
      2drop
      -1
    +loop
;
: calc_deltas
   calc_deltas_output_layer
   calc_deltas_hidden_layers
;
\ calculate weigths and baises increments
fvariable eta
fvariable beta
: >eta  eta f! ;
: >beta beta f! ;
1e-4 >eta
9e-1 >beta
\ dweights
: calc_dweights
    net @ 1+ 0 do
      i 1+ count_layer !
      neurons_previous_layer
      neurons_current_layer
      get_layer_address
      neurons_current_layer 0 do
        i D f@
        neurons_previous_layer 0 do
          -1 count_layer +!
          neurons_current_layer
          get_layer_address
          i O f@
          1 count_layer +!
          fover f*
          eta f@ f* fnegate
          2drop
          j i dW f@
          beta f@ f*
          f+
          j i dW f!
        loop
        fdrop
      loop
      2drop drop
    loop
;
\ dbiases
: calc_dbiases
    net @ 1+ 0 do
      i 1+ count_layer !
      neurons_current_layer
      get_layer_address
      neurons_current_layer 0 do
        i D f@
        eta f@ f* fnegate
        i dB f@
        beta f@ f*
        f+
        i dB f!
      loop
      2drop
    loop
;
\ update weights and biases
\ weights
: update_weights
    net @ 1+ 0 do
      i 1+ count_layer !
      neurons_previous_layer
      neurons_current_layer
      get_layer_address
      neurons_current_layer 0 do
        neurons_previous_layer 0 do
          j i dW f@
          j i W f@
          f+
          j i W f!
        loop
      loop
      2drop drop
    loop
;
\ dbiases
: update_biases
    net @ 1+ 0 do
      i 1+ count_layer !
      neurons_current_layer
      get_layer_address
      neurons_current_layer 0 do
        i dB f@
        i B f@
        f+
        i B f!
      loop
      2drop
    loop
;
: one_pass
    forward_pass
    calc_cost
    calc_deltas
    calc_dweights
    update_weights
    calc_dbiases
    update_biases
;
\ data
variable n_samples
variable data
: >n_samples n_samples ! ;
: >data data ! ;
\ one epoch
fvariable sum_cost
fvariable previous_sum_cost
1e9 previous_sum_cost f!
: one_epoch
    0e sum_cost f!
    n_samples @ 0 do
      data @
      i n_inputs @ n_outputs @ + *
      n_inputs @ 0 do
        2dup
        i + floats + f@
        inputs i floats + f!
      loop
      2drop
      data @
      i n_inputs @ n_outputs @ + * n_inputs @ +
      n_outputs @ 0 do
        2dup
        i + floats + f@
        desired_outputs i floats + f!
      loop
      2drop
      one_pass
      cost f@ sum_cost f@ f+ sum_cost f!
    loop
;
\ learn for several epochs
variable n_epochs
fvariable tol  \ tolerance
fvariable rtol \ relative tolerance
variable display_step
variable adapt_eta?
variable init_net?
: >epochs n_epochs ! ;
: >tol tol f! ;
: >rtol rtol f! ;
: >display_step display_step ! ;
: >adapt_eta adapt_eta? ! ;
: >init_net  init_net? ! ;
1000 >epochs
1e-3 >tol
0e >rtol
1 >display_step
false >adapt_eta
true >init_net
: learn
    cr s" Learning..." type
    cr s" -----------" type
    cr s" epochs| Cost" type
    cr s" ------+ ----" type
    init_net? @ if
      init_net
    then
    n_epochs @ 0 do
      one_epoch
      i display_step @ mod 0= if
        cr i . 3 spaces sum_cost f@ f. \ 2 spaces previous_sum_cost f@
f.
      then
      sum_cost f@ tol f@ f< if
    unloop exit
  then
      sum_cost f@ previous_sum_cost f@ f- fabs
      rtol f@ f< if
        unloop exit
      then
      adapt_eta? @ if
        sum_cost f@ previous_sum_cost f@ f> if
          eta f@ 0.99e f* >eta
          beta f@ 0.99e f* >beta
          1e9 previous_sum_cost f!
          cr ." -------------updating eta and
beta-----------------------"
        then
      then
      sum_cost f@ previous_sum_cost f!
    loop
;
: test
    cr ." inputs | outputs (desired outputs)"
    cr ." -------+--------------------------"
    n_samples @ 0 do
      cr
      n_inputs @ 0 do
        data @ j n_inputs @ n_outputs @ + * i + floats + f@
        inputs i floats + f!
      loop
      forward_pass
      n_inputs @ 0 do
        inputs  i floats + f@ f.
      loop
      ."  |  "
      n_outputs @ 0 do
        outputs i floats + f@ f.
        ."  ("
        data @ j n_inputs @ n_outputs @ + * n_inputs @ + i + floats + f@
f.
        ." )  "
      loop
    loop
;
\ for making predictions
: to_inputs 0 n_inputs @ 1- do inputs i floats + f! -1 +loop ;
: outputs_ident ;
: outputs_softmax
    n_outputs @ 0 do
      outputs i floats + f@
      1e0 f* fexp
      outputs i floats + f!
    loop
    0e
    n_outputs @ 0 do
      outputs i floats + f@ f+
    loop
    n_outputs @ 0 do
      outputs i floats + f@
      fover f/
      outputs i floats + f!
    loop
    fdrop
;
: outputs_probs ( f: lambda -- )
    0e
    n_outputs @ 0 do
      outputs i floats + f@ f+
    loop
    n_outputs @ 0 do
      outputs i floats + f@
      fover f/
      outputs i floats + f!
    loop
    fdrop
;
defer outputs_ips      \ i stands for ident, p for probs and s for
softmax
' outputs_probs is outputs_ips
: .outputs
    cr ." out_n°| value"
    cr       ." ------+------"
    n_outputs @ 0 do
      cr i . ."     | " outputs i floats + f@ f.
    loop
;
: net_predict         to_inputs forward_pass                 .outputs ;
: net_predict_probs   to_inputs forward_pass outputs_probs   .outputs ;
: net_predict_softmax to_inputs forward_pass outputs_softmax .outputs ;
: net_predict_ips     to_inputs forward_pass outputs_ips     .outputs ;
\ Prediction: possible forms
\ net_predict
\ net_predict_probs
\ net_predict_softmax
\ net_predict_ips
\ forward_pass .outputs
\ forward_pass outputs_probs   .outputs
\ forward_pass outputs_softmax .outputs
-----------The code finishes here--------------------
Enjoy,
Ahmed
--

Date Sujet#  Auteur
2 Dec 24 * Neural networks from scratch in forth11Ahmed
2 Dec 24 `* Re: Neural networks from scratch in forth10Ahmed
2 Dec 24  `* Re: Neural networks from scratch in forth9Ahmed
2 Dec 24   `* Re: Neural networks from scratch in forth8mhx
3 Dec 24    `* Re: Neural networks from scratch in forth7Ahmed
3 Dec 24     `* Re: Neural networks from scratch in forth6mhx
3 Dec 24      `* Re: Neural networks from scratch in forth5Ahmed
3 Dec 24       `* Re: Neural networks from scratch in forth4albert
3 Dec 24        `* Re: Neural networks from scratch in forth3Ahmed
3 Dec 24         `* Re: Neural networks from scratch in forth2Ahmed
3 Dec 24          `- Re: Neural networks from scratch in forth1Ahmed

Haut de la page

Les messages affichés proviennent d'usenet.

NewsPortal