medfall

A super great game engine
Log | Files | Refs

context.hpp (39751B)


      1 /*  Relacy Race Detector
      2  *  Copyright (c) 2008-2013, Dmitry S. Vyukov
      3  *  All rights reserved.
      4  *  This software is provided AS-IS with no warranty, either express or implied.
      5  *  This software is distributed under a license and may not be copied,
      6  *  modified or distributed except as expressly authorized under the
      7  *  terms of the license contained in the file LICENSE in this distribution.
      8  */
      9 
     10 #ifndef RL_CONTEXT_HPP
     11 #define RL_CONTEXT_HPP
     12 #ifdef _MSC_VER
     13 #   pragma once
     14 #endif
     15 
     16 #include "base.hpp"
     17 #include "thread_local_ctx.hpp"
     18 #include "context_base.hpp"
     19 #include "thread.hpp"
     20 #include "history.hpp"
     21 #include "memory.hpp"
     22 #include "test_result.hpp"
     23 #include "slab_allocator.hpp"
     24 #include "test_params.hpp"
     25 #include "random.hpp"
     26 #include "foreach.hpp"
     27 
     28 #include "random_scheduler.hpp"
     29 #include "full_search_scheduler.hpp"
     30 #include "context_bound_scheduler.hpp"
     31 
     32 
     33 
     34 namespace rl
     35 {
     36 
     37 template<thread_id_t thread_count> class generic_mutex_data_impl;
     38 template<thread_id_t thread_count> class condvar_data_impl;
     39 template<thread_id_t thread_count> class sema_data_impl;
     40 template<thread_id_t thread_count> class event_data_impl;
     41 
     42 
     43 struct park_event
     44 {
     45     bool is_timed_;
     46     bool allow_spurious_;
     47 
     48     void output(std::ostream& s) const
     49     {
     50         s << "blocking current thread" << (is_timed_ ? " [timed]" : "");
     51     }
     52 };
     53 
     54 struct unpark_event
     55 {
     56     thread_id_t thread_;
     57 
     58     void output(std::ostream& s) const
     59     {
     60         s << "unblocking thread " << thread_;
     61     }
     62 };
     63 
     64 struct yield_event
     65 {
     66     unsigned count_;
     67 
     68     void output(std::ostream& s) const
     69     {
     70         s << "yield(" << count_ << ")";
     71     }
     72 };
     73 
     74 
     75 /*
     76 template<typename test_t, typename scheduler_t>
     77 struct context_persistent
     78 {
     79     static thread_id_t const        thread_count = test_t::params::thread_count;
     80     fiber_t                         fibers_ [thread_count];
     81     memory_mgr                      memory_;
     82 
     83     context_persistent()
     84     {
     85         for (thread_id_t i = 0; i != thread_count; ++i)
     86         {
     87             create_fiber(fibers_[i], &context_impl<test_t, scheduler_t>::fiber_proc, (void*)(intptr_t)i);
     88         }
     89     }
     90 
     91     ~context_persistent()
     92     {
     93         for (thread_id_t i = 0; i != thread_count; ++i)
     94         {
     95             delete_fiber(fibers_[i]);
     96         }
     97     }
     98 };
     99 */
    100 
    101 
    102 template<typename test_t, typename scheduler_t>
    103 class context_impl
    104     : thread_local_contxt_impl<context_addr_hash_impl<context, test_t::params::thread_count>, test_t::params::thread_count>
    105 {
    106 private:
    107     typedef thread_local_contxt_impl
    108         <context_addr_hash_impl<context, test_t::params::thread_count>,
    109             test_t::params::thread_count>
    110                 base_t;
    111     typedef typename scheduler_t::shared_context_t shared_context_t;
    112 
    113     using base_t::params_;
    114     using base_t::history_;
    115     using base_t::threadx_;
    116     using base_t::disable_preemption_;
    117     using base_t::disable_alloc_;
    118     using base_t::invariant_executing;
    119 
    120     static thread_id_t const main_thread_id = -1;
    121     static thread_id_t const static_thread_count = test_t::params::static_thread_count;
    122     static thread_id_t const dynamic_thread_count = test_t::params::dynamic_thread_count;
    123     static thread_id_t const thread_count = test_t::params::thread_count;
    124 
    125     iteration_t                     current_iter_;
    126     test_result_e                   test_result_;
    127     string                          test_result_str_;
    128     fiber_t                         main_fiber_;
    129     bool                            special_function_executing;
    130     memory_mgr                      memory_;
    131     iteration_t                     start_iteration_;
    132     size_t                          sched_count_;
    133     scheduler_t                     sched_;
    134     shared_context_t&               sctx_;
    135     random_generator                rand_;
    136     test_t*                         current_test_suite;
    137     bool                            current_test_suite_constructed;
    138     bool                            first_thread_;
    139     timestamp_t                     seq_cst_fence_order_ [thread_count];
    140 
    141     aligned<thread_info<thread_count> > threads_ [thread_count];
    142 
    143     thread_info<thread_count>& threadi()
    144     {
    145         return *static_cast<thread_info<thread_count>*>(threadx_);
    146     }
    147 
    148     slab_allocator<atomic_data_impl<thread_count> >*        atomic_alloc_;
    149     slab_allocator<var_data_impl<thread_count> >*           var_alloc_;
    150     slab_allocator<generic_mutex_data_impl<thread_count> >* mutex_alloc_;
    151     slab_allocator<condvar_data_impl<thread_count> >*       condvar_alloc_;
    152     slab_allocator<sema_data_impl<thread_count> >*          sema_alloc_;
    153     slab_allocator<event_data_impl<thread_count> >*         event_alloc_;
    154 
    155     virtual atomic_data* atomic_ctor(void* ctx)
    156     {
    157         return new (atomic_alloc_->alloc(ctx)) atomic_data_impl<thread_count> ();
    158     }
    159 
    160     virtual void atomic_dtor(atomic_data* data)
    161     {
    162         static_cast<atomic_data_impl<thread_count>*>(data)->~atomic_data_impl<thread_count>();
    163         atomic_alloc_->free(static_cast<atomic_data_impl<thread_count>*>(data));
    164     }
    165 
    166     virtual var_data* var_ctor()
    167     {
    168         return new (var_alloc_->alloc()) var_data_impl<thread_count> ();
    169     }
    170 
    171     virtual void var_dtor(var_data* data)
    172     {
    173         static_cast<var_data_impl<thread_count>*>(data)->~var_data_impl<thread_count>();
    174         var_alloc_->free(static_cast<var_data_impl<thread_count>*>(data));
    175     }
    176 	
    177     virtual unpark_reason wfmo_park(void** ws,
    178                                     win_waitable_object** wo,
    179                                     size_t count,
    180                                     bool wait_all,
    181                                     bool is_timed,
    182                                     debug_info_param info)
    183     {
    184 			  return waitset<thread_count>::park_current(*this,
    185                                                          reinterpret_cast<waitset<thread_count>**>(ws),
    186                                                          wo, count, wait_all, is_timed, true, info);
    187     }
    188 
    189 public:
    190     context_impl(test_params& params, shared_context_t& sctx)
    191         : base_t(thread_count, params)
    192         , current_iter_(0)
    193         , start_iteration_(1)
    194         , sched_(params, sctx, dynamic_thread_count)
    195         , sctx_(sctx)
    196     {
    197         this->context::seq_cst_fence_order_ = this->seq_cst_fence_order_;
    198 
    199         current_test_suite = (test_t*)(::malloc)(sizeof(test_t));
    200         current_test_suite_constructed = false;
    201 
    202         test_result_ = test_result_success;
    203         threadx_ = 0;
    204         special_function_executing = false;
    205         invariant_executing = false;
    206 
    207         create_main_fiber(main_fiber_);
    208         set_low_thread_prio();
    209 
    210         if (0 == val(thread_count))
    211         {
    212             throw std::logic_error("no threads created");
    213         }
    214 
    215         atomic_alloc_ = new slab_allocator<atomic_data_impl<thread_count> >();
    216         var_alloc_ = new slab_allocator<var_data_impl<thread_count> >();
    217         mutex_alloc_ = new slab_allocator<generic_mutex_data_impl<thread_count> >();
    218         condvar_alloc_ = new slab_allocator<condvar_data_impl<thread_count> >();
    219         sema_alloc_ = new slab_allocator<sema_data_impl<thread_count> >();
    220         event_alloc_ = new slab_allocator<event_data_impl<thread_count> >();
    221 
    222         for (thread_id_t i = 0; i != thread_count; ++i)
    223         {
    224             new (&threads_[i]) thread_info<thread_count> (i);
    225             threads_[i].ctx_ = this;
    226         }
    227 
    228         for (thread_id_t i = 0; i != thread_count; ++i)
    229         {
    230             //threads_[i].fiber_ = persistent.fibers_[i];
    231             create_fiber(threads_[i].fiber_, &context_impl::fiber_proc, (void*)(intptr_t)i);
    232         }
    233 
    234         disable_alloc_ = 0;
    235     }
    236 
    237     ~context_impl()
    238     {
    239         disable_alloc_ += 1;
    240 
    241         for (thread_id_t i = 0; i != thread_count; ++i)
    242         {
    243             delete_fiber(threads_[i].fiber_);
    244         }
    245 
    246         delete_main_fiber(main_fiber_);
    247 
    248         // there can be atomic loads and stores etc
    249         // it's not good place to calling user code
    250         //destroy_current_test_suite();
    251         //::free(current_test_suite);
    252 
    253         delete atomic_alloc_;
    254         delete var_alloc_;
    255         delete mutex_alloc_;
    256         delete condvar_alloc_;
    257         delete sema_alloc_;
    258         delete event_alloc_;
    259     }
    260 
    261     void construct_current_test_suite()
    262     {
    263         RL_VERIFY(false == current_test_suite_constructed);
    264         new (current_test_suite) test_t ();
    265         current_test_suite_constructed = true;
    266     }
    267 
    268     void destroy_current_test_suite()
    269     {
    270         if (current_test_suite_constructed)
    271         {
    272             current_test_suite->~test_t();
    273             current_test_suite_constructed = false;
    274         }
    275     }
    276 
    277     virtual void* alloc(size_t size, bool is_array, debug_info_param info)
    278     {
    279         disable_alloc_ += 1;
    280 #ifndef RL_GC
    281         void* p = memory_.alloc(size);
    282 #else
    283         void* p = memory_.alloc(size, (void(*)(void*))0);
    284 #endif
    285         disable_alloc_ -= 1;
    286         RL_HIST_CTX(memory_alloc_event) {p, size, is_array} RL_HIST_END();
    287         return p;
    288     }
    289 
    290 #ifdef RL_GC
    291     virtual void* alloc(size_t size, bool is_array, void(*dtor)(void*), debug_info_param info)
    292     {
    293         disable_alloc_ += 1;
    294         void* p = memory_.alloc(size, dtor);
    295         disable_alloc_ -= 1;
    296         RL_HIST_CTX(memory_alloc_event) {p, size, is_array} RL_HIST_END();
    297         return p;
    298     }
    299 #endif
    300 
    301     virtual void free(void* p, bool is_array, debug_info_param info)
    302     {
    303         RL_HIST_CTX(memory_free_event) {p, is_array} RL_HIST_END();
    304 #ifndef RL_GC
    305         bool const defer = (0 == sched_.rand(this->is_random_sched() ? 4 : 2, sched_type_mem_realloc));
    306 #else
    307         bool const defer = false;
    308 #endif
    309         disable_alloc_ += 1;
    310         if (false == memory_.free(p, defer))
    311             fail_test("incorrect address passed to free() function", test_result_double_free, info);
    312         disable_alloc_ -= 1;
    313     }
    314 
    315     size_t prev_alloc_size_;
    316     debug_info last_info_;
    317 
    318     virtual void* alloc(size_t size)
    319     {
    320         if (disable_alloc_)
    321             return (::malloc)(size);
    322 
    323         prev_alloc_size_ = size;
    324         disable_alloc_ += 1;
    325 #ifndef RL_GC
    326         void* p = (memory_.alloc)(size);
    327 #else
    328         void* p = (memory_.alloc)(size, 0);
    329 #endif
    330         disable_alloc_ -= 1;
    331         return p;
    332     }
    333 
    334     virtual size_t prev_alloc_size()
    335     {
    336         size_t sz = prev_alloc_size_;
    337         prev_alloc_size_ = 0;
    338         return sz;
    339     }
    340 
    341     virtual void set_debug_info(debug_info_param info)
    342     {
    343         last_info_ = info;
    344     }
    345 
    346     virtual void free(void* p)
    347     {
    348         if (disable_alloc_)
    349         {
    350             (::free)(p);
    351             return;
    352         }
    353         
    354         disable_alloc_ += 1;
    355         debug_info const& info = last_info_;
    356         RL_HIST_CTX(memory_free_event) {p, false} RL_HIST_END();
    357 #ifndef RL_GC
    358         bool const defer = (0 == sched_.rand(this->is_random_sched() ? 4 : 2, sched_type_mem_realloc));
    359 #else
    360         bool const defer = false;
    361 #endif
    362         if (false == memory_.free(p, defer))
    363             fail_test("incorrect address passed to free() function", test_result_double_free, info);
    364         disable_alloc_ -= 1;
    365     }
    366 
    367     virtual unpark_reason park_current_thread(bool is_timed,
    368                                               bool allow_spurious_wakeup,
    369                                               bool do_switch,
    370                                               debug_info_param info)
    371     {
    372         RL_VERIFY(false == special_function_executing);
    373         RL_VERIFY(threadx_->saved_disable_preemption_ == -1);
    374         unsigned dp = disable_preemption_;
    375         disable_preemption_ = 0;
    376         RL_HIST_CTX(park_event) {is_timed, allow_spurious_wakeup} RL_HIST_END();
    377         if (false == sched_.park_current_thread(is_timed, allow_spurious_wakeup))
    378         {
    379             fail_test("deadlock detected", test_result_deadlock, info);
    380         }
    381         schedule(1);
    382         // otherwise it's restored in switch_back()
    383         RL_VERIFY(threadx_->saved_disable_preemption_ == -1);
    384         if (do_switch == false || threadx_->unpark_reason_ != unpark_reason_normal)
    385             disable_preemption_ = dp;
    386         else
    387             threadx_->saved_disable_preemption_ = dp;
    388         unpark_reason reason = threadx_->unpark_reason_;
    389         return reason;
    390     }
    391 
    392     virtual void unpark_thread(thread_id_t th, bool do_switch, debug_info_param info)
    393     {
    394         RL_VERIFY(false == special_function_executing);
    395         RL_HIST_CTX(unpark_event) {th} RL_HIST_END();
    396         sched_.unpark_thread(th, do_switch);
    397         if (do_switch)
    398         {
    399             threads_[th].unpark_reason_ = unpark_reason_normal;
    400             threads_[th].temp_switch_from_ = threadx_->index_;
    401             switch_to_fiber(th);
    402         }
    403     }
    404 
    405     virtual void switch_back(debug_info_param info)
    406     {
    407 //std::cout << "switching back from " << threadx_->index_ << " to " << threadx_->temp_switch_from_ << std::endl;
    408         (void)info;
    409         RL_VERIFY(threadx_->saved_disable_preemption_ != -1);
    410         RL_VERIFY(threadx_->temp_switch_from_ != -1);
    411         thread_id_t const tid = threadx_->temp_switch_from_;
    412         threadx_->temp_switch_from_ = -1;
    413         switch_to_fiber(tid);
    414         RL_VERIFY(threadx_->saved_disable_preemption_ != -1);
    415         disable_preemption_ = threadx_->saved_disable_preemption_;
    416         threadx_->saved_disable_preemption_ = -1;
    417     }
    418 
    419     void ensure(bool cond, char const* desc, test_result_e res, debug_info_param info)
    420     {
    421         if (false == cond)
    422             fail_test(desc, res, info);
    423     }
    424 
    425     virtual void fail_test(char const* desc, test_result_e res, debug_info_param info)
    426     {
    427 
    428         RL_DEBUGBREAK_ON_FAILURE_IMPL;
    429 
    430         RL_VERIFY(test_result_success != res);
    431 
    432         test_result_ = res;
    433         if (test_result_user_assert_failed == res && invariant_executing)
    434             test_result_ = test_result_user_invariant_failed;
    435         if (0 == desc || 0 == desc[0])
    436             test_result_str_ = test_result_str(test_result_);
    437         else
    438             test_result_str_ = string(test_result_str(test_result_)) + " (" + desc + ")";
    439 
    440         RL_HIST_CTX(user_event) {test_result_str_.c_str()} RL_HIST_END();
    441 
    442         switch_to_main_fiber();
    443     }
    444 
    445     virtual void rl_until(char const* desc, debug_info_param info)
    446     {
    447         RL_HIST_CTX(user_event) {desc} RL_HIST_END();
    448         test_result_ = test_result_until_condition_hit;
    449         switch_to_main_fiber();
    450     }
    451 
    452     static void fiber_proc(void* thread_index);
    453 
    454     virtual void fiber_proc_impl(int thread_index)
    455     {
    456         thread_info_base* param = &threads_[thread_index];
    457         debug_info info = $;
    458         for (;;)
    459         {
    460             if (first_thread_)
    461             {
    462                 first_thread_ = false;
    463                 special_function_executing = true;
    464                 RL_HIST_CTX(user_event) {"[CTOR BEGIN]"} RL_HIST_END();
    465                 construct_current_test_suite();
    466                 RL_HIST_CTX(user_event) {"[CTOR END]"} RL_HIST_END();
    467                 RL_HIST_CTX(user_event) {"[BEFORE BEGIN]"} RL_HIST_END();
    468                 current_test_suite->before();
    469                 RL_HIST_CTX(user_event) {"[BEFORE END]"} RL_HIST_END();
    470                 rl_global_fence();
    471                 invariant_executing = true;
    472                 current_test_suite->invariant();
    473                 invariant_executing = false;
    474                 special_function_executing = false;
    475             }
    476 
    477 //std::cout << "thread " << param->index_ << " started" << std::endl;
    478             param->on_start();
    479 
    480             if (param->index_ < static_thread_count)
    481             {
    482                 current_test_suite->thread(param->index_);
    483             }
    484             else
    485             {
    486                 if (param->dynamic_thread_func_)
    487                     param->dynamic_thread_func_(param->dynamic_thread_param_);
    488             }
    489 
    490 //std::cout << "thread " << param->index_ << " finished" << std::endl;
    491             RL_HIST_CTX(user_event) {"[THREAD FINISHED]"} RL_HIST_END();
    492             RL_VERIFY(disable_preemption_ == 0);
    493             RL_VERIFY(threadx_->temp_switch_from_ == -1);
    494             RL_VERIFY(threadx_->saved_disable_preemption_ == -1);
    495 
    496             param->on_finish();
    497 
    498             thread_finish_result res = sched_.thread_finished();
    499 //std::cout << "thread " << param->index_ << " finished res=" << res << std::endl;
    500             if (thread_finish_result_normal == res)
    501             {
    502                 sched();
    503             }
    504             else if (thread_finish_result_last == res)
    505             {
    506                 special_function_executing = true;
    507                 invariant_executing = true;
    508                 current_test_suite->invariant();
    509                 invariant_executing = false;
    510                 rl_global_fence();
    511                 RL_HIST_CTX(user_event) {"[AFTER BEGIN]"} RL_HIST_END();
    512                 current_test_suite->after();
    513                 RL_HIST_CTX(user_event) {"[AFTER END]"} RL_HIST_END();
    514                 RL_HIST_CTX(user_event) {"[DTOR BEGIN]"} RL_HIST_END();
    515                 destroy_current_test_suite();
    516                 RL_HIST_CTX(user_event) {"[DTOR END]"} RL_HIST_END();
    517                 special_function_executing = false;
    518 
    519                 ensure(memory_.iteration_end(), "memory leak detected", test_result_memory_leak, $);
    520                 ensure(atomic_alloc_->iteration_end(), "atomic leak", test_result_resource_leak, $);
    521                 ensure(var_alloc_->iteration_end(), "var leak", test_result_resource_leak, $);
    522                 ensure(mutex_alloc_->iteration_end(), "mutex leak", test_result_resource_leak, $);
    523                 ensure(condvar_alloc_->iteration_end(), "condition variable leak", test_result_resource_leak, $);
    524                 ensure(sema_alloc_->iteration_end(), "semaphore leak", test_result_resource_leak, $);
    525                 ensure(event_alloc_->iteration_end(), "event leak", test_result_resource_leak, $);
    526 
    527                 switch_to_main_fiber();
    528             }
    529             else if (thread_finish_result_deadlock == res)
    530             {
    531                 fail_test("deadlock detected", test_result_deadlock, info);
    532             }
    533             else
    534             {
    535                 RL_VERIFY(false);
    536             }
    537         }
    538     }
    539 
    540     virtual win_waitable_object* create_thread(void*(*fn)(void*), void* ctx)
    541     {
    542         RL_VERIFY(fn);
    543         thread_id_t id = sched_.create_thread();
    544         threads_[id].dynamic_thread_func_ = fn;
    545         threads_[id].dynamic_thread_param_ = ctx;
    546         threads_[id].sync_object_.on_create();
    547         return &threads_[id].sync_object_;
    548     }
    549 
    550     virtual void yield(unsigned count, debug_info_param info)
    551     {
    552         RL_VERIFY(count);
    553         RL_HIST_CTX(yield_event) {count} RL_HIST_END();
    554         if (sched_count_++ > params_.execution_depth_limit)
    555             fail_test("livelock", test_result_livelock, RL_INFO);
    556         schedule(count);
    557     }
    558 
    559     virtual void sched()
    560     {
    561         if (sched_count_++ > params_.execution_depth_limit)
    562             fail_test("livelock", test_result_livelock, RL_INFO);
    563         if (disable_preemption_)
    564             return;
    565         schedule(0);
    566     }
    567 
    568     void schedule(unsigned yield)
    569     {
    570         RL_VERIFY(threadx_->temp_switch_from_ == -1);
    571         RL_VERIFY(disable_preemption_ == 0);
    572         if (special_function_executing)
    573         {
    574             threadx_->unpark_reason_ = unpark_reason_normal;
    575             return;
    576         }
    577 
    578         special_function_executing = true;
    579         invariant_executing = true;
    580         current_test_suite->invariant();
    581         invariant_executing = false;
    582         special_function_executing = false;
    583 
    584         if (yield)
    585             threadx_->last_yield_ = threadi().own_acq_rel_order_;
    586 
    587         unpark_reason reason = unpark_reason_normal;
    588         thread_id_t const th = sched_.schedule(reason, yield);
    589         threads_[th].unpark_reason_ = reason;
    590 
    591         switch_to_fiber(th);
    592         RL_VERIFY(0 == disable_preemption_);
    593     }
    594 
    595     test_result_e simulate(std::ostream& ss, std::istream& sss, bool second)
    596     {
    597         if (EOF != sss.peek())
    598         {
    599             sss >> start_iteration_;
    600             sched_.set_state(sss);
    601         }
    602 
    603         test_result_e const res = simulate2(second);
    604 
    605         if (test_result_success != res && false == params_.collect_history)
    606         {
    607             ss << params_.stop_iteration << " ";
    608             sched_.get_state(ss);
    609         }
    610 
    611         return res;
    612     }
    613 
    614     test_result_e simulate2(bool second)
    615     {
    616         debug_info info = $;
    617 
    618         current_iter_ = start_iteration_;
    619         for (; ; ++current_iter_)
    620         {
    621             rand_.seed(current_iter_);
    622 
    623             iteration(current_iter_);
    624 
    625             if (test_result_success != test_result_)
    626             {
    627                 params_.test_result = test_result_;
    628                 params_.stop_iteration = current_iter_;
    629                 if (params_.collect_history)
    630                     output_history();
    631                 return test_result_;
    632             }
    633 
    634             // If you hit assert here, then probably your test is non-deterministic
    635             // Check whether you are using functions like ::rand()
    636             // or static variables or values of object addresses (for hashing) in your test
    637             // Replace ::rand() with rl::rand(), eliminate static variables in the test
    638             RL_VERIFY(second == false);
    639             (void)second;
    640 
    641             RL_HIST_CTX(user_event) {"ITERATION END"} RL_HIST_END();
    642 
    643             if (sched_.iteration_end())
    644                 break;
    645         }
    646 
    647         params_.test_result = test_result_success;
    648         params_.stop_iteration = current_iter_;
    649         return test_result_success;
    650     }
    651 
    652     RL_INLINE static void reset_thread(thread_info<thread_count>& ti)
    653     {
    654         foreach<thread_count>(
    655             ti.acquire_fence_order_,
    656             &assign_zero);
    657         foreach<thread_count>(
    658             ti.release_fence_order_,
    659             &assign_zero);
    660 
    661 #ifdef RL_IMPROVED_SEQ_CST_FENCE
    662         foreach<thread_count>(ti.imp_seq_cst_order_, &assign_zero);
    663 #endif
    664     }
    665 
    666     void iteration(iteration_t iter)
    667     {
    668         first_thread_ = true;
    669         disable_preemption_ = 0;
    670         sched_count_ = 0;
    671 
    672         foreach<thread_count>(
    673             threads_,
    674             &context_impl::reset_thread);
    675 
    676         foreach<thread_count>(
    677             seq_cst_fence_order_,
    678             &assign_zero);
    679 
    680         base_t::iteration_begin();
    681 
    682         for (thread_id_t i = 0; i != thread_count; ++i)
    683         {
    684             threads_[i].iteration_begin();
    685         }
    686 
    687         disable_alloc_ += 1;
    688         thread_id_t const th = sched_.iteration_begin(iter);
    689         disable_alloc_ -= 1;
    690         switch_to_fiber(th);
    691 
    692         if (0 == iter % progress_probe_period)
    693         {
    694             output_progress(iter);
    695         }
    696     }
    697 
    698 private:
    699     void switch_to_fiber(thread_id_t th)
    700     {
    701         fiber_t& prev = threadx_ ? threadx_->fiber_ : main_fiber_;
    702         threadx_ = &threads_[th];
    703         ::switch_to_fiber(threadx_->fiber_, prev);
    704     }
    705 
    706     void switch_to_main_fiber()
    707     {
    708         fiber_t& prev = threadx_->fiber_;
    709         threadx_ = 0;
    710         ::switch_to_fiber(main_fiber_, prev);
    711     }
    712 
    713     void output_progress(iteration_t iter)
    714     {
    715         iteration_t const total = sched_.iteration_count();
    716 
    717         if (0 == iter % (progress_probe_period * 16))
    718         {
    719             disable_alloc_ += 1;
    720             *params_.progress_stream << iter * 100 / total << "% ("
    721                 << iter << "/" << total << ")" << std::endl;
    722             disable_alloc_ -= 1;
    723         }
    724     }
    725 
    726     virtual unsigned rand(unsigned limit, sched_type t)
    727     {
    728         return sched_.rand(limit, t);
    729     }
    730 
    731     void output_history()
    732     {
    733         if (false == params_.output_history)
    734         {
    735             *params_.output_stream << test_result_str_ << std::endl;
    736             *params_.output_stream << "iteration: " << params_.stop_iteration << std::endl;
    737             *params_.output_stream << std::endl;
    738         }
    739         history_.print_exec_history(params_.output_history);
    740 
    741 #ifndef RL_GC
    742         if (test_result_memory_leak == test_result_)
    743         {
    744             memory_.output_allocs(*params_.output_stream);
    745         }
    746 #endif
    747 
    748         //!!! output other leaked resources
    749         if (test_result_ == test_result_resource_leak
    750             && atomic_alloc_->iteration_end() == false)
    751         {
    752             *params_.output_stream << "leaked atomics:" << std::endl;
    753             atomic_alloc_->output_allocs(*params_.output_stream);
    754         }
    755     }
    756 
    757     void rl_global_fence()
    758     {
    759         timestamp_t max_acq_rel = 0;
    760         for (thread_id_t i = 0; i != thread_count; ++i)
    761         {
    762             if (threads_[i].acq_rel_order_[i] > max_acq_rel)
    763                 max_acq_rel = threads_[i].acq_rel_order_[i];
    764         }
    765 
    766         for (thread_id_t i = 0; i != thread_count; ++i)
    767         {
    768             for (thread_id_t j = 0; j != thread_count; ++j)
    769             {
    770                 threads_[i].acq_rel_order_[j] = max_acq_rel;
    771             }
    772         }
    773     }
    774 
    775     virtual void atomic_thread_fence_acquire()
    776     {
    777         threadi().atomic_thread_fence_acquire();
    778     }
    779 
    780     virtual void atomic_thread_fence_release()
    781     {
    782         threadi().atomic_thread_fence_release();
    783     }
    784 
    785     virtual void atomic_thread_fence_acq_rel()
    786     {
    787         threadi().atomic_thread_fence_acq_rel();
    788     }
    789 
    790     virtual void atomic_thread_fence_seq_cst()
    791     {
    792         sched();
    793         threadi().atomic_thread_fence_seq_cst(seq_cst_fence_order_);
    794     }
    795 
    796     virtual thread_id_t get_thread_count() const
    797     {
    798         return thread_count;
    799     }
    800 
    801     virtual generic_mutex_data* mutex_ctor(bool is_rw, bool is_exclusive_recursive, bool is_shared_recursive, bool failing_try_lock)
    802     {
    803         return new (mutex_alloc_->alloc()) generic_mutex_data_impl<thread_count>(is_rw, is_exclusive_recursive, is_shared_recursive, failing_try_lock);
    804     }
    805 
    806     virtual void mutex_dtor(generic_mutex_data* m)
    807     {
    808         generic_mutex_data_impl<thread_count>* mm = static_cast<generic_mutex_data_impl<thread_count>*>(m);
    809         mm->~generic_mutex_data_impl<thread_count>();
    810         mutex_alloc_->free(mm);
    811     }
    812 
    813     virtual condvar_data* condvar_ctor(bool allow_spurious_wakeups)
    814     {
    815         return new (condvar_alloc_->alloc()) condvar_data_impl<thread_count>(allow_spurious_wakeups);
    816     }
    817 
    818     virtual void condvar_dtor(condvar_data* cv)
    819     {
    820         condvar_data_impl<thread_count>* mm = static_cast<condvar_data_impl<thread_count>*>(cv);
    821         mm->~condvar_data_impl<thread_count>();
    822         condvar_alloc_->free(mm);
    823     }
    824 
    825     virtual sema_data* sema_ctor(bool spurious_wakeups, unsigned initial_count, unsigned max_count)
    826     {
    827         return new (sema_alloc_->alloc()) sema_data_impl<thread_count>(spurious_wakeups, initial_count, max_count);
    828     }
    829 
    830     virtual void sema_dtor(sema_data* cv)
    831     {
    832         sema_data_impl<thread_count>* mm = static_cast<sema_data_impl<thread_count>*>(cv);
    833         mm->~sema_data_impl<thread_count>();
    834         sema_alloc_->free(mm);
    835     }
    836 
    837     virtual event_data* event_ctor(bool manual_reset, bool initial_state)
    838     {
    839         return new (event_alloc_->alloc()) event_data_impl<thread_count>(manual_reset, initial_state);
    840     }
    841 
    842     virtual void event_dtor(event_data* cv)
    843     {
    844         event_data_impl<thread_count>* mm = static_cast<event_data_impl<thread_count>*>(cv);
    845         mm->~event_data_impl<thread_count>();
    846         event_alloc_->free(mm);
    847     }
    848 
    849     context_impl(context_impl const&);
    850     context_impl& operator = (context_impl const&);
    851 };
    852 
    853 /*
    854 template<typename test_t, typename sched_t>
    855 struct thread_params_t
    856 {
    857     typedef context_impl<test_t, sched_t> context_t;
    858 
    859     //HANDLE                  handle;
    860     context_t*              ctx;
    861     ostringstream      oss;
    862     istringstream*     iss;
    863 
    864     //RL_NOCOPY(thread_params_t);
    865 };
    866 
    867 
    868 template<typename test_t, typename sched_t>
    869 unsigned __stdcall thread_func(void * ctx)
    870 {
    871     typedef thread_params_t<test_t, sched_t> params_t;
    872     params_t& p = *static_cast<params_t*>(ctx);
    873     p.ctx->simulate(p.oss, *p.iss, false);
    874     return 0;
    875 }
    876 */
    877 
    878 template<typename test_t, typename sched_t>
    879 test_result_e run_test(test_params& params, std::ostream& oss, bool second)
    880 {
    881     typedef context_impl<test_t, sched_t> context_t;
    882     typedef typename sched_t::shared_context_t shared_context_t;
    883     //typedef thread_params_t<test_t, sched_t> params_t;
    884 
    885     //bool destroy_persistent = false;
    886     //context_persistent<test_t, sched_t>* persistent = 0;
    887     //if (persistent_ptr == 0)
    888     //{
    889     //    persistent = new context_persistent<test_t, sched_t>;
    890     //    persistent_ptr = persistent;
    891     //}
    892     //else
    893     //{
    894     //    persistent = static_cast<context_persistent<test_t, sched_t>*>(persistent_ptr);
    895     //    destroy_persistent = true;
    896     //}
    897 
    898     shared_context_t sctx;
    899     test_result_e res;
    900 
    901     //if (second == false)
    902     {
    903         istringstream iss (params.initial_state);
    904         res = context_t(params, sctx).simulate(oss, iss, second);
    905     }
    906     //else
    907     //{
    908     //    size_t const thread_count = 2;
    909     //    vector<params_t*>::type threads (thread_count);
    910     //    for (size_t i = 0; i != thread_count; i += 1)
    911     //    {
    912     //        threads[i] = new params_t;
    913     //        threads[i]->iss = new istringstream(params.initial_state);
    914     //        threads[i]->ctx = new context_t(params, sctx);
    915     //        threads[i]->handle = (HANDLE)(_beginthreadex)(0, 0, &thread_func<test_t, sched_t>, threads[i], 0, 0);
    916     //    }
    917 
    918     //    for (size_t i = 0; i != thread_count; i += 1)
    919     //    {
    920     //        (WaitForSingleObject)(threads[i]->handle, (INFINITE));
    921     //    }
    922 
    923     //    for (size_t i = 0; i != thread_count; i += 1)
    924     //    {
    925     //        delete threads[i]->ctx;
    926     //        delete threads[i]->iss;
    927     //        delete threads[i];
    928     //    }
    929 
    930     //    return test_result_success;
    931     //}
    932 
    933     //if (destroy_persistent)
    934     //{
    935     //    delete persistent;
    936     //    persistent_ptr = 0;
    937     //}
    938 
    939     return res;
    940 }
    941 
    942 
    943 template<typename test_t>
    944 bool simulate(test_params& params)
    945 {
    946     char const* test_name = typeid(test_t).name();
    947 		while (test_name[0] >= '0' && test_name[0] <= '9')
    948         test_name += 1;
    949     params.test_name = test_name;
    950     *params.output_stream << params.test_name << std::endl;
    951 
    952     unsigned start_time = get_tick_count();
    953 
    954     //void* persistent = 0;
    955 
    956     ostringstream oss;
    957     //istringstream iss (params.initial_state);
    958     test_result_e res = test_result_success;
    959     if (random_scheduler_type == params.search_type)
    960         res = run_test<test_t, random_scheduler<test_t::params::thread_count> >(params, oss, false);
    961     else if (fair_full_search_scheduler_type == params.search_type)
    962         res = run_test<test_t, full_search_scheduler<test_t::params::thread_count> >(params, oss, false);
    963     else if (fair_context_bound_scheduler_type == params.search_type)
    964         res = run_test<test_t, context_bound_scheduler<test_t::params::thread_count> >(params, oss, false);
    965     else
    966         RL_VERIFY(false);
    967 
    968     if (test_result_success == res)
    969     {
    970         unsigned t = get_tick_count() - start_time;
    971         if (0 == t)
    972             t = 1;
    973 
    974         *params.output_stream << "iterations: " << params.stop_iteration << std::endl;
    975         *params.output_stream << "total time: " << t << std::endl;
    976         *params.output_stream << "throughput: " << (uint64_t)params.stop_iteration * 1000 / t << std::endl;
    977         *params.output_stream << std::endl;
    978     }
    979     else if (false == params.output_history && false == params.collect_history)
    980     {
    981         ostringstream oss2;
    982         params.initial_state = oss.str();
    983         //istringstream iss2 (oss.str());
    984         params.collect_history = true;
    985         params.final_state = oss.str();
    986         iteration_t const stop_iter = params.stop_iteration;
    987         test_result_e res2 = test_result_success;
    988         if (random_scheduler_type == params.search_type)
    989             res2 = run_test<test_t, random_scheduler<test_t::params::thread_count> >(params, oss2, true);
    990         else if (fair_full_search_scheduler_type == params.search_type)
    991             res2 = run_test<test_t, full_search_scheduler<test_t::params::thread_count> >(params, oss2, true);
    992         else if (fair_context_bound_scheduler_type == params.search_type)
    993             res2 = run_test<test_t, context_bound_scheduler<test_t::params::thread_count> >(params, oss2, true);
    994         else
    995             RL_VERIFY(false);
    996 
    997         // If you hit assert here, then probably your test is non-deterministic
    998         // Check whether you are using functions like ::rand()
    999         // or static variables or values of object addresses (for hashing) in your test
   1000         // Replace ::rand() with rl::rand(), eliminate static variables in the test
   1001         RL_VERIFY(res == res2);
   1002 
   1003         RL_VERIFY(params.stop_iteration == stop_iter);
   1004         (void)stop_iter;
   1005         (void)res2;
   1006     }
   1007     return test_t::params::expected_result == res;
   1008 }
   1009 
   1010 template<typename test_t>
   1011 bool simulate()
   1012 {
   1013     test_params params;
   1014     return simulate<test_t>(params);
   1015 }
   1016 
   1017 template<void(*func)(), size_t thread_count>
   1018 struct simulate_thunk : test_suite<simulate_thunk<func, thread_count>, 1>
   1019 {
   1020     static size_t const dynamic_thread_count = thread_count;
   1021     void thread(unsigned)
   1022     {
   1023         func();
   1024     }
   1025 };
   1026 
   1027 template<void(*func)(), size_t thread_count>
   1028 bool execute(test_params& params)
   1029 {
   1030     return simulate<simulate_thunk<func, thread_count> >(params);
   1031 }
   1032 
   1033 template<void(*func)(), size_t thread_count>
   1034 bool execute()
   1035 {
   1036     return simulate<simulate_thunk<func, thread_count> >();
   1037 }
   1038 
   1039 typedef bool (*simulate_f)(test_params&);
   1040 
   1041 
   1042 template<typename test_t, typename scheduler_t>
   1043 void context_impl<test_t, scheduler_t>::fiber_proc(void* thread_index)
   1044 {
   1045     ctx().fiber_proc_impl((int)(intptr_t)thread_index);
   1046 }
   1047 
   1048 template<typename type>
   1049 void dtor_arr_impl(void* pp)
   1050 {
   1051     type* p = (type*)((char*)pp + alignment);
   1052     size_t count = *(size_t*)pp;
   1053     for (size_t i = 0; i != count; ++i)
   1054     {
   1055        p->~type();
   1056        p += 1;
   1057     }
   1058 }
   1059 
   1060 template<typename type>
   1061 type* new_arr_impl(size_t count, rl::debug_info_param info)
   1062 {
   1063     RL_VERIFY(alignment >= sizeof(size_t));
   1064     context& c = ctx();
   1065 #ifndef RL_GC
   1066     void* mem = c.alloc(alignment + count * sizeof(type), true, info);
   1067 #else
   1068     void* mem = c.alloc(alignment + count * sizeof(type), true, &dtor_arr_impl<type>, info);
   1069 #endif
   1070     *(size_t*)mem = count;
   1071     size_t i = 0;
   1072     char* begin = (char*)mem + alignment;
   1073     char* pos = begin;
   1074     try
   1075     {
   1076         for (; i != count; ++i)
   1077         {
   1078             new (pos) type;
   1079             pos += sizeof(type);
   1080         }
   1081         return (type*)begin;
   1082     }
   1083     catch (...)
   1084     {
   1085         pos -= sizeof(type);
   1086         i -= 1;
   1087         for (; i < count; --i)
   1088         {
   1089             ((type*)pos)->~type();
   1090             pos -= sizeof(type);
   1091         }
   1092         ctx().free(mem, true, info);
   1093         throw;
   1094     }
   1095 }
   1096 
   1097 template<typename type>
   1098 void delete_arr_impl(type* p, debug_info_param info)
   1099 {
   1100     if (p == 0)
   1101         return;
   1102     context& c = ctx();
   1103     char* begin = (char*)p - alignment;
   1104     size_t count = *(size_t*)begin;
   1105     for (size_t i = 0; i != count; ++i)
   1106     {
   1107        p->~type();
   1108        p += 1;
   1109     }
   1110     c.free(begin, true, info);
   1111 }
   1112 
   1113 template<typename type>
   1114 void delete_impl(type* p, debug_info_param info)
   1115 {
   1116     p->~type();
   1117     ctx().free(p, false, info);
   1118 }
   1119 
   1120 template<typename type>
   1121 void dtor_impl(void* p)
   1122 {
   1123     static_cast<type*>(p)->~type();
   1124 }
   1125 
   1126 inline unsigned rand(unsigned limit)
   1127 {
   1128     return ctx().rand(limit, sched_type_user);
   1129 }
   1130 
   1131 inline unsigned thread_index()
   1132 {
   1133     return ctx().threadx_->index_;
   1134 }
   1135 
   1136 
   1137 struct new_proxy
   1138 {
   1139     debug_info info;
   1140     new_proxy(debug_info_param info)
   1141         : info(info)
   1142     {
   1143         //printf(__FUNCSIG__ "\n");
   1144     }
   1145 
   1146     template<typename T>
   1147     T* operator % (T* p)
   1148     {
   1149         context& c = ctx();
   1150         size_t sz = c.prev_alloc_size();
   1151         if (sz)
   1152         {
   1153             RL_HIST(memory_alloc_event) {p, sz, false} RL_HIST_END();
   1154         }
   1155         return p;
   1156     }
   1157 };
   1158 
   1159 struct delete_proxy
   1160 {
   1161     //debug_info info_;
   1162     delete_proxy(debug_info_param info)
   1163         //: info_(info)
   1164     {
   1165         ctx().set_debug_info(info);
   1166         //printf(__FUNCSIG__ "\n");
   1167     }
   1168 };
   1169 
   1170 inline void* rl_malloc(size_t sz, debug_info_param info)
   1171 {
   1172     return ctx().alloc(sz, false, info);
   1173 }
   1174 
   1175 inline void* rl_calloc(size_t sz, size_t cnt, debug_info_param info)
   1176 {
   1177     void* p = ctx().alloc(sz * cnt, false, info);
   1178     memset(p, 0, sz * cnt);
   1179     return p;
   1180 }
   1181 
   1182 inline void* realloc(void* p, size_t sz, debug_info_param info)
   1183 {
   1184     if (sz == 0)
   1185     {
   1186         ctx().free(p, false, info);
   1187         return 0;
   1188     }
   1189     else
   1190     {
   1191         void* pp = ctx().alloc(sz, false, info);
   1192         memcpy(pp, p, sz); //!!! how much memory to move?
   1193         ctx().free(p, false, info);
   1194         return pp;
   1195     }
   1196 }
   1197 
   1198 inline void rl_free(void* p, debug_info_param info)
   1199 {
   1200     ctx().free(p, false, info);
   1201 }
   1202 
   1203 inline size_t hash_ptr(void const* p, size_t size)
   1204 {
   1205     return ctx().get_addr_hash(p) % size;
   1206 }
   1207 
   1208 inline void systemwide_fence(debug_info_param info)
   1209 {
   1210     context& c = ctx();
   1211     RL_HIST(user_msg_event) {"system-wide fence"} RL_HIST_END();
   1212     c.rl_global_fence();
   1213 }
   1214 
   1215 } // namespace rl
   1216 
   1217 
   1218 #ifndef RL_GC
   1219 inline void* operator new (size_t size, rl::debug_info_param info)
   1220 {
   1221     return rl::ctx().alloc(size, false, info);
   1222 }
   1223 
   1224 inline void* operator new [] (size_t size, rl::debug_info_param info)
   1225 {
   1226     return rl::ctx().alloc(size, false, info);
   1227 }
   1228 
   1229 inline void operator delete (void* p, rl::debug_info_param info)
   1230 {
   1231     rl::ctx().free(p, false, info);
   1232 }
   1233 
   1234 inline void operator delete [] (void* p, rl::debug_info_param info)
   1235 {
   1236     rl::ctx().free(p, false, info);
   1237 }
   1238 #endif
   1239 
   1240 
   1241 
   1242 #ifdef RL_GC
   1243 inline void* operator new (size_t size, void(*dtor)(void*), rl::debug_info_param info)
   1244 {
   1245     return rl::ctx().alloc(size, false, dtor, info);
   1246 }
   1247 
   1248 inline void operator delete (void* p, void(*dtor)(void*), rl::debug_info_param info)
   1249 {
   1250     (void)p;
   1251     (void)dtor;
   1252     (void)info;
   1253 }
   1254 #endif
   1255 
   1256 inline void* operator new (size_t size) RL_THROW_SPEC(std::bad_alloc)
   1257 {
   1258     if (&rl::ctx())
   1259         return rl::ctx().alloc(size);
   1260     else
   1261         return (::malloc)(size);
   1262 }
   1263 
   1264 inline void* operator new [] (size_t size) RL_THROW_SPEC(std::bad_alloc)
   1265 {
   1266     if (&rl::ctx())
   1267         return rl::ctx().alloc(size);
   1268     else
   1269         return (::malloc)(size);
   1270 }
   1271 
   1272 inline void operator delete (void* p) throw()
   1273 {
   1274     if (&rl::ctx())
   1275         rl::ctx().free(p);
   1276     else
   1277         (::free)(p);
   1278 }
   1279 
   1280 inline void operator delete [] (void* p) throw()
   1281 {
   1282     if (&rl::ctx())
   1283         rl::ctx().free(p);
   1284     else
   1285         (::free)(p);
   1286 }
   1287 
   1288 #define RL_NEW_PROXY rl::new_proxy($) % new
   1289 #define RL_DELETE_PROXY rl::delete_proxy($) , delete
   1290 
   1291 #endif