@@ -431,10 +431,10 @@ class rpnnBouncer {
431431 { return seed_; }
432432 void seed (size_t x)
433433 { rnd_.seed (x); }
434- virtual void bounce (void )
435- { b_ (this ); }
436434 virtual void reset (void ) // rpnnBouncer has no reset
437435 { }
436+ virtual void bounce (void )
437+ { b_ (this ); }
438438 std::mt19937_64 & rnd (void )
439439 { return rnd_; }
440440 double base (void ) const
@@ -492,6 +492,7 @@ class Rpnn {
492492 neuron_idx_out_of_range, \
493493 illegal_logistic_at_output, \
494494 loading_in_non_receptor, \
495+ norm_engaged_after_inputs_loaded, \
495496 insufficient_inputs, \
496497 min_two_perceptrons_requied, \
497498 perceptron_size_illegal, \
@@ -594,12 +595,19 @@ lNeuron::const_iterator output_neurons_itr(void) const
594595 { return error_trail_.capacity (); }
595596 Rpnn & lm_detection (size_t x)
596597 { error_trail_.capacity (x); return *this ; }
598+ Rpnn & reset_lm () {
599+ error_trail_.clear (); // empty error_trail_ drives init,
600+ error_trail_.push_back (std::numeric_limits<double >::max ());// non-empty
601+ return *this ;
602+ }
597603
598604
599605 size_t epoch (void ) const
600606 { return epoch_; }
601607 Rpnn & epoch (size_t x)
602608 { epoch_ = x; return *this ; }
609+ size_t epochs (void ) const
610+ { return epochs_; }
603611 double min_step (void ) const
604612 { return MIN_STEP_; }
605613 Rpnn & min_step (double x)
@@ -648,8 +656,15 @@ lNeuron::const_iterator output_neurons_itr(void) const
648656 bool normalizing (void ) const { // is input normalization engaged?
649657 return not nis_.empty ();
650658 }
651- Rpnn & normalize (double min = -1 ., double max = 1 .)
652- { nis_.resize (1 , Norm (min, max - min)); return *this ; }
659+ Rpnn & normalize (double min = -1 ., double max = 1 .) {
660+ // indicate normalization engagement, must be used before inputs loaded
661+ if (receptors_itr ()->input_pattern () != nullptr )
662+ throw EXP (Rpnn::norm_engaged_after_inputs_loaded);
663+ if (min == max) nis_.clear (); // disable normalization
664+ else
665+ nis_.resize (1 , Norm (min, max - min));
666+ return *this ;
667+ }
653668 Rpnn & activate (size_t p) {
654669 for (auto ei = effectors_itr (); ei != neurons ().end (); ei++)
655670 ei->activate (p);
@@ -668,7 +683,6 @@ lNeuron::const_iterator output_neurons_itr(void) const
668683 else ri->load (*ii);
669684 return activate ();
670685 }
671- void init_neurons_ (void );
672686 Rpnn & cost_function (c_func *cf)
673687 { cf_ = cf; return *this ; }
674688 c_func * cost_function (void ) const
@@ -702,7 +716,8 @@ lNeuron::const_iterator output_neurons_itr(void) const
702716 SERDES (Rpnn, input_sets_, nis_, target_sets_, nts_, neurons_,
703717 &Rpnn::serdes_itr_, output_errors_, target_error_,
704718 MIN_STEP_, MAX_STEP_, DW_FACTOR_, wb_, wbp_,
705- Rpnn::cf_Sse, Rpnn::cf_Xntropy, cf_, epoch_, terminate_, error_trail_, gpm_)
719+ Rpnn::cf_Sse, Rpnn::cf_Xntropy, cf_,
720+ epoch_, epochs_, terminate_, error_trail_, gpm_)
706721
707722 OUTABLE (Rpnn, addr(), min_step(), max_step(), dw_factor(), target_error_,
708723 cost_func (), wbp_, epoch_, terminate_,
@@ -749,6 +764,7 @@ const std::vector<Norm>&target_normalization(void) const
749764 gpm_{ XMACRO_FOR_EACH (GENPARAMS) }; // general parameters map
750765 #undef XMACRO
751766
767+ void init_neurons_ (void );
752768 void compute_error_ (size_t p);
753769 void educate_ (size_t p);
754770 bool is_lm_detected_ (double err);
@@ -772,6 +788,7 @@ const std::vector<Norm>&target_normalization(void) const
772788 rpnnBouncer * wbp_{&wb_};
773789 fifoDeque<double > error_trail_; // for detecting LM traps
774790 size_t epoch_{0 };
791+ size_t epochs_;
775792 bool stop_on_nan_{true };
776793 bool terminate_{false };
777794
@@ -782,11 +799,6 @@ const std::vector<Norm>&target_normalization(void) const
782799
783800 private:
784801
785- Rpnn & reset_lm_ (void ) {
786- error_trail_.clear (); // empty error_trail_ drives init,
787- error_trail_.push_back (std::numeric_limits<double >::max ());// non-empty
788- return *this ;
789- }
790802 double normalize_ (double x, rpnnNeuron *n) { // normalize receptr in its pattern
791803 auto ni = ++neurons ().begin (); // begin from 1st receptor
792804 for (auto nsi = nis_.begin (); nsi != nis_.end (); ++nsi, ++ni)
@@ -996,8 +1008,9 @@ void rpnnNeuron::grow_synapses(size_t i) // extend synaps
9961008
9971009void rpnnNeuron::prune_synapses (size_t n) {
9981010 // n - linked neuron n (not a synapse index!) in NN topology
1011+ auto & neuron = nn ().neuron (n);
9991012 for (auto si = synapses ().begin (); si != synapses ().end (); ++si)
1000- if ( &si->linked_neuron () == &nn (). neuron (n) )
1013+ if (&si->linked_neuron () == &neuron)
10011014 { synapses ().erase (si); break ; }
10021015}
10031016
@@ -1292,18 +1305,21 @@ void Rpnn::topology_check(void) {
12921305
12931306void Rpnn::converge (size_t epochs) {
12941307 // converge either to solution (error < target) or end of epochs
1308+ epochs_ = epochs; // could be used by user's bouncers
1309+
12951310 if (error_trail_.empty ()) { // haven't been trained before
1296- DBG (2 ) DOUT () << " dump before convergence: " << *this << std::endl;
12971311 topology_check ();
12981312 bouncer ().reset ();
12991313 bounce_weights ();
13001314 init_neurons_ ();
1315+ DBG (2 ) DOUT () << " dump before convergence: " << *this << std::endl;
13011316 }
13021317
1303- if (not terminate_) epoch_ = 0 ;
13041318 size_t patterns = (++neurons ().begin ())->input_pattern ()->size ();
1319+ if (not terminate_) epoch_ = 0 ; // preserve epoch value
13051320
1306- while (not terminate_) { // terminate_ is external signal
1321+ while (epoch_ < epochs) {
1322+ if (terminate_) break ; // terminate_ is external signal
13071323 reset_errors ();
13081324
13091325 for (size_t p = 0 ; p < patterns; p++) { // cycle through all patterns
@@ -1317,9 +1333,8 @@ void Rpnn::converge(size_t epochs) {
13171333 if (stop_on_nan () and isnan (global_err))
13181334 throw EXP (Rpnn::stopped_on_nan_error);
13191335
1320- if (not wbp_->finish_upon_lmd ()) // check for errors and epochs
1321- if (global_err < target_error () or epoch_ >= epochs) // only if not running in BLM mode
1322- break ;
1336+ if (global_err < target_error ())
1337+ break ;
13231338
13241339 if (lm_detection () and is_lm_detected_ (global_err)) { // found local minimum?
13251340 if (wbp_->finish_upon_lmd ()) break ; // in BLM it's end of run
@@ -1414,7 +1429,7 @@ bool Rpnn::is_lm_detected_(double err) {
14141429 << " , LMD_PTRN_: " << LMD_PTRN_
14151430 << " , error: " << global_error ()
14161431 << " (target error: " << target_error () << " )" << std::endl;
1417- reset_lm_ ();
1432+ reset_lm ();
14181433 return true ; // indicate LM found
14191434 }
14201435 ++++hare;
@@ -1521,18 +1536,16 @@ void blmFinder::bounce(void) {
15211536 std::vector<Rpnn> nnv (tm_.size (), nn ());
15221537 std::vector<rpnnBouncer> lmv (tm_.size ()); // bouncer per each nn in nnv
15231538
1524- size_t synapse_cnt = nnv.front ().synapse_count ();
15251539 auto lmv_it = lmv.begin ();
15261540 for (auto &n: nnv) {
15271541 lmv_it->finish_upon_lmd (true ); // engage BLM modes
15281542 n.bouncer (*lmv_it++); // setup own bouncer for each nn
15291543 n.bouncer ().weight_updater (nn ().bouncer ().weight_updater ()); // restore bouncer function
1530- n.lm_detection (nn ().lm_detection () == 0 ? // original rpnn might not have it
1531- synapse_cnt * 3 : nn ().lm_detection ()); // so setup if it doesn't
1544+ n.lm_detection (nn ().lm_detection ());
15321545 n.DBG ().severity (NDBG);
15331546 }
15341547 DBG (1 )
1535- DOUT () << " lm_detection size for clones : " << nnv.front ().lm_detection () << std::endl;
1548+ DOUT () << " lm_detection size for threads : " << nnv.front ().lm_detection () << std::endl;
15361549
15371550 find_blm (nnv);
15381551 nn ().terminate (); // don't converge original nn
@@ -1542,40 +1555,46 @@ void blmFinder::bounce(void) {
15421555
15431556void blmFinder::find_blm (nnv_type &nnv) {
15441557 // run multiple threads searching for the deepest LM
1545- auto glambda = [&](Rpnn &n, auto &&... arg) { // helper lambda to start thread
1558+ auto glambda = [&](Rpnn &n, auto &&... arg) { // helper lambda to start thread
15461559 try { return n.converge (std::forward<decltype (arg)>(arg)...); }
15471560 catch (Rpnn::stdException & e) {
1548- DBG (0 ) DOUT () << " exception by one of the threads, invalidating its errors " << std::endl;
1561+ DBG (0 ) DOUT () << " exception by one of threads, restarting the thread " << std::endl;
15491562 for (auto &oe: n.output_errors ())
15501563 oe = std::numeric_limits<double >::max ();
1564+ n.reset_lm ();
15511565 }
15521566 };
15531567
1554- for (auto &n: nnv)
1555- tm_.start_sync (glambda, std::ref (n), SIZE_T (- 1 ));
1568+ for (auto &n: nnv) // prepare a sync start
1569+ tm_.start_sync (glambda, std::ref (n), nn (). epochs ( ));
15561570 tm_.start_sync ();
15571571
15581572 for (size_t seat = 0 ;
15591573 not is_goal_reached_ ();
1560- tm_.run_seat (seat, glambda, std::ref (nnv[seat].bounce_weights ()), SIZE_T (-1 ))) {
1574+ tm_.run_seat (seat, glambda, std::ref (nnv[seat].bounce_weights ()), nn ().epochs ())) {
1575+
15611576 seat = tm_.await_seat (); // wait for any thread to finish
15621577 auto & n = nnv[seat];
1563- nn ().epoch (nn ().epoch () + n.epoch ());
1564- if (n.global_error () >= best_lm_err_) { // worse LM found
1578+ nn ().epoch (nn ().epoch () + n.epoch ()); // accumulate total epochs
1579+ double global_error = n.global_error ();
1580+
1581+ if (global_error >= best_lm_err_) { // worse LM found
15651582 auto check = goal_err_;
15661583 goal_err_ += (best_lm_err_ - goal_err_) / reduce_factor (); // reducing goal error might render
15671584 if (goal_err_ == check) break ; // it out of double precision
1585+ DBG (1 ) DOUT () << " fruitless convergence, adjusting goal to: " << goal_err_ << std::endl;
15681586 continue ;
15691587 }
15701588
15711589 // better LM found
15721590 preserve_weights_ (n);
1573- best_lm_err_ = n. global_error () ;
1591+ best_lm_err_ = global_error;
15741592 goal_err_ = best_lm_err_ / reduce_factor ();
1575- DBG (2 )
1593+ DBG (1 )
15761594 DOUT () << " better error found: " << best_lm_err_ << " (goal: " << goal_err_ << " )" << std::endl;
15771595 }
15781596
1597+ DBG (0 ) DOUT () << " end of search reached, terminating all threads..." << std::endl;
15791598 for (auto &n: nnv) n.terminate (); // to all threads
15801599 tm_.join ();
15811600}
0 commit comments