medfall

A super great game engine
Log | Files | Refs

mutex.hpp (20631B)


      1 /*  Relacy Race Detector
      2  *  Copyright (c) 2008-2013, Dmitry S. Vyukov
      3  *  All rights reserved.
      4  *  This software is provided AS-IS with no warranty, either express or implied.
      5  *  This software is distributed under a license and may not be copied,
      6  *  modified or distributed except as expressly authorized under the
      7  *  terms of the license contained in the file LICENSE in this distribution.
      8  */
      9 
     10 #ifndef RL_MUTEX_HPP
     11 #define RL_MUTEX_HPP
     12 #ifdef _MSC_VER
     13 #   pragma once
     14 #endif
     15 
     16 #include "../base.hpp"
     17 #include "../context.hpp"
     18 #include "../thread.hpp"
     19 #include "../atomic.hpp"
     20 #include "../waitset.hpp"
     21 #include "../signature.hpp"
     22 #include "../sync_var.hpp"
     23 #include "../foreach.hpp"
     24 #include "semaphore.hpp"
     25 
     26 
     27 
     28 namespace rl
     29 {
     30 
     31 struct generic_mutex_data : nocopy<>
     32 {
     33     virtual bool lock_exclusive(bool is_timed, debug_info_param info) = 0;
     34     virtual bool try_lock_exclusive(debug_info_param info) = 0;
     35     virtual void unlock_exclusive(debug_info_param info) = 0;
     36     virtual void lock_shared(debug_info_param info) = 0;
     37     virtual bool try_lock_shared(debug_info_param info) = 0;
     38     virtual void unlock_shared(debug_info_param info) = 0;
     39     virtual void unlock_exclusive_or_shared(debug_info_param info) = 0;
     40     virtual bool is_signaled(debug_info_param info) = 0;
     41     virtual void memory_acquire(debug_info_param info) = 0;
     42     virtual void* prepare_wait(debug_info_param info) = 0;
     43     virtual ~generic_mutex_data() {} // just to calm down gcc
     44 };
     45 
     46 
     47 template<thread_id_t thread_count>
     48 class generic_mutex_data_impl : public generic_mutex_data
     49 {
     50 public:
     51     struct event_t
     52     {
     53         enum type_e
     54         {
     55             type_lock,
     56             type_unlock,
     57             type_recursive_lock,
     58             type_recursive_unlock,
     59             type_failed_try_lock,
     60             type_spuriously_failed_try_lock,
     61             type_lock_shared,
     62             type_unlock_shared,
     63             type_recursive_lock_shared,
     64             type_recursive_unlock_shared,
     65             type_failed_try_lock_shared,
     66             type_spuriously_failed_try_lock_shared,
     67             type_wait,
     68             type_destroying_owned_mutex,
     69         };
     70 
     71         generic_mutex_data_impl const* var_addr_;
     72         type_e type_;
     73 
     74         void output(std::ostream& s) const
     75         {
     76             s << "<" << std::hex << var_addr_ << std::dec << "> mutex: ";
     77             switch (type_)
     78             {
     79             case type_lock: s << "exclusive lock"; break;
     80             case type_unlock: s << "exclusive unlock"; break;
     81             case type_recursive_lock: s << "recursive exclusive lock"; break;
     82             case type_recursive_unlock: s << "recursive exclusive unlock"; break;
     83             case type_failed_try_lock: s << "failed exclusive try lock"; break;
     84             case type_spuriously_failed_try_lock: s << "spuriously failed exclusive try lock"; break;
     85             case type_lock_shared: s << "shared lock"; break;
     86             case type_unlock_shared: s << "shared unlock"; break;
     87             case type_recursive_lock_shared: s << "recursive shared lock"; break;
     88             case type_recursive_unlock_shared: s << "recursive shared unlock"; break;
     89             case type_failed_try_lock_shared: s << "failed shared try lock"; break;
     90             case type_spuriously_failed_try_lock_shared: s << "spuriously failed shared try lock"; break;
     91             case type_wait: s << "blocking"; break;
     92             case type_destroying_owned_mutex: s << "destroying owned mutex"; break;
     93             }
     94         }
     95     };
     96 
     97     generic_mutex_data_impl(bool is_rw, bool is_exclusive_recursive, bool is_shared_recursive, bool failing_try_lock)
     98         : is_rw_(is_rw)
     99         , is_exclusive_recursive_(is_exclusive_recursive)
    100         , is_shared_recursive_(is_shared_recursive)
    101         , failing_try_lock_(failing_try_lock)
    102         , exclusive_owner_(state_free)
    103         , exclusive_recursion_count_(0)
    104         , shared_lock_count_(0)
    105         , try_lock_failed_()
    106     {
    107         context& c = ctx();
    108         (void)c;
    109         RL_VERIFY(false == c.invariant_executing);
    110         foreach<thread_count>(shared_owner_, &assign_zero);
    111     }
    112 
    113     ~generic_mutex_data_impl()
    114     {
    115         context& c = ctx();
    116         RL_VERIFY(false == c.invariant_executing);
    117         if (exclusive_owner_ != state_free
    118             || exclusive_waitset_
    119             || shared_waitset_)
    120         {
    121             debug_info info = $;
    122             RL_HIST(event_t) {this, event_t::type_destroying_owned_mutex} RL_HIST_END();
    123             RL_ASSERT_IMPL(false, test_result_destroying_owned_mutex, "", $);
    124         }
    125     }
    126 
    127     virtual bool lock_exclusive(bool is_timed, debug_info_param info)
    128     {
    129         context& c = ctx();
    130         c.sched();
    131         sign_.check(info);
    132         RL_VERIFY(false == c.invariant_executing);
    133 
    134         thread_id_t const my_id = c.threadx_->index_;
    135 
    136         if (exclusive_owner_ == state_shared && shared_owner_[my_id])
    137         {
    138             RL_HIST(event_t) {this, event_t::type_lock} RL_HIST_END();
    139             RL_ASSERT_IMPL(false, test_result_mutex_read_to_write_upgrade, "", info);
    140         }
    141 
    142         if (exclusive_owner_ == my_id)
    143         {
    144             RL_HIST(event_t) {this, event_t::type_recursive_lock} RL_HIST_END();
    145             if (is_exclusive_recursive_)
    146             {
    147                 exclusive_recursion_count_ += 1;
    148                 return true;
    149             }
    150             else
    151             {
    152                 RL_ASSERT_IMPL(false, test_result_recursion_on_nonrecursive_mutex, "", info);
    153             }
    154         }
    155 
    156         for (;;)
    157         {
    158             if (exclusive_owner_ == state_free)
    159             {
    160                 RL_VERIFY(exclusive_recursion_count_ == 0);
    161                 //!!! in some implementation here must be acq_rel
    162                 sync_.acquire(c.threadx_);
    163                 exclusive_recursion_count_ = 1;
    164                 exclusive_owner_ = my_id;
    165                 RL_HIST(event_t) {this, event_t::type_lock} RL_HIST_END();
    166                 return true;
    167             }
    168             else
    169             {
    170                 RL_VERIFY(my_id != exclusive_owner_);
    171                 RL_HIST(event_t) {this, event_t::type_wait} RL_HIST_END();
    172                 unpark_reason reason = exclusive_waitset_.park_current(c, is_timed, false, false, info);
    173                 RL_VERIFY(reason != unpark_reason_spurious);
    174                 if (reason == unpark_reason_timeout)
    175                 {
    176                     sync_.acquire(c.threadx_);
    177                     return false;
    178                 }
    179             }
    180 
    181             //??? c.sched();
    182             //sign_.check(info);
    183         }
    184     }
    185 
    186     virtual bool try_lock_exclusive(debug_info_param info)
    187     {
    188         context& c = ctx();
    189         c.sched();
    190         sign_.check(info);
    191         RL_VERIFY(false == c.invariant_executing);
    192 
    193         thread_id_t const my_id = c.threadx_->index_;
    194 
    195         if (exclusive_owner_ == state_shared && shared_owner_[my_id])
    196         {
    197             RL_HIST(event_t) {this, event_t::type_lock} RL_HIST_END();
    198             RL_ASSERT_IMPL(false, test_result_mutex_read_to_write_upgrade, "", info);
    199         }
    200 
    201         if (exclusive_owner_ == my_id)
    202         {
    203             RL_HIST(event_t) {this, event_t::type_recursive_lock} RL_HIST_END();
    204             if (is_exclusive_recursive_)
    205             {
    206                 exclusive_recursion_count_ += 1;
    207                 return true;
    208             }
    209             else
    210             {
    211                 RL_ASSERT_IMPL(false, test_result_recursion_on_nonrecursive_mutex, "", info);
    212             }
    213         }
    214 
    215         if (exclusive_owner_ == state_free)
    216         {
    217             RL_VERIFY(exclusive_recursion_count_ == 0);
    218             //!!! probability rand
    219             if (true == failing_try_lock_
    220                 && false == try_lock_failed_
    221                 && c.rand(2, sched_type_user))
    222             {
    223                 try_lock_failed_ = true;
    224                 RL_HIST(event_t) {this, event_t::type_spuriously_failed_try_lock} RL_HIST_END();
    225                 return false;
    226             }
    227             else
    228             {
    229                 sync_.acquire(c.threadx_);
    230                 exclusive_recursion_count_ = 1;
    231                 exclusive_owner_ = my_id;
    232                 RL_HIST(event_t) {this, event_t::type_lock} RL_HIST_END();
    233                 return true;
    234             }
    235         }
    236         else
    237         {
    238             //!!! in some implementation here must be acquire
    239             //sync_.acquire(c.threadx_);
    240 
    241             RL_VERIFY(my_id != exclusive_owner_);
    242             RL_HIST(event_t) {this, event_t::type_failed_try_lock} RL_HIST_END();
    243             return false;
    244         }
    245     }
    246 
    247     virtual void unlock_exclusive(debug_info_param info)
    248     {
    249         context& c = ctx();
    250         c.sched();
    251         sign_.check(info);
    252         RL_VERIFY(false == c.invariant_executing);
    253 
    254         thread_id_t const my_id = c.threadx_->index_;
    255 
    256         if (exclusive_owner_ != my_id)
    257         {
    258             RL_HIST(event_t) {this, event_t::type_unlock} RL_HIST_END();
    259             RL_ASSERT_IMPL(false, test_result_unlocking_mutex_wo_ownership, "", info);
    260         }
    261 
    262         exclusive_recursion_count_ -= 1;
    263         if (exclusive_recursion_count_)
    264         {
    265             RL_VERIFY(is_exclusive_recursive_);
    266             RL_HIST(event_t) {this, event_t::type_recursive_unlock} RL_HIST_END();
    267             return;
    268         }
    269 
    270         sync_.release(c.threadx_);
    271         exclusive_owner_ = state_free;
    272         RL_VERIFY(exclusive_recursion_count_ == 0);
    273 
    274         if (false == exclusive_waitset_.unpark_one(c, info))
    275             shared_waitset_.unpark_all(c, info);
    276 
    277         RL_HIST(event_t) {this, event_t::type_unlock} RL_HIST_END();
    278     }
    279 
    280     virtual void lock_shared(debug_info_param info)
    281     {
    282         RL_VERIFY(is_rw_);
    283         context& c = ctx();
    284         c.sched();
    285         sign_.check(info);
    286         RL_VERIFY(false == c.invariant_executing);
    287 
    288         thread_id_t const my_id = c.threadx_->index_;
    289 
    290         if (exclusive_owner_ == my_id)
    291         {
    292             RL_HIST(event_t) {this, event_t::type_lock_shared} RL_HIST_END();
    293             RL_ASSERT_IMPL(false, test_result_mutex_write_to_read_upgrade, "", info);
    294         }
    295 
    296         if (exclusive_owner_ == state_shared && shared_owner_[my_id])
    297         {
    298             RL_HIST(event_t) {this, event_t::type_recursive_lock_shared} RL_HIST_END();
    299             if (is_shared_recursive_)
    300             {
    301                 shared_owner_[my_id] += 1;
    302                 shared_lock_count_ += 1;
    303                 return;
    304             }
    305             else
    306             {
    307                 RL_ASSERT_IMPL(false, test_result_recursion_on_nonrecursive_mutex, "", info);
    308             }
    309         }
    310 
    311         for (;;)
    312         {
    313             if ((exclusive_owner_ == state_free)
    314                 || (exclusive_owner_ == state_shared
    315                     && false == exclusive_waitset_))
    316             {
    317                 sync_.acquire(c.threadx_);
    318                 shared_owner_[my_id] += 1;
    319                 shared_lock_count_ += 1;
    320                 exclusive_owner_ = state_shared;
    321                 RL_HIST(event_t) {this, event_t::type_lock_shared} RL_HIST_END();
    322                 break;
    323             }
    324             else
    325             {
    326                 RL_VERIFY(my_id != exclusive_owner_);
    327                 RL_HIST(event_t) {this, event_t::type_wait} RL_HIST_END();
    328                 shared_waitset_.park_current(c, false, false, false, info);
    329             }
    330 
    331             //??? c.sched();
    332             //sign_.check(info);
    333         }
    334     }
    335 
    336     virtual bool try_lock_shared(debug_info_param info)
    337     {
    338         RL_VERIFY(is_rw_);
    339         context& c = ctx();
    340         c.sched();
    341         sign_.check(info);
    342         RL_VERIFY(false == c.invariant_executing);
    343 
    344         thread_id_t const my_id = c.threadx_->index_;
    345 
    346         if (exclusive_owner_ == my_id)
    347         {
    348             RL_HIST(event_t) {this, event_t::type_lock_shared} RL_HIST_END();
    349             RL_ASSERT_IMPL(false, test_result_mutex_write_to_read_upgrade, "", info);
    350         }
    351 
    352         if (exclusive_owner_ == state_shared && shared_owner_[my_id])
    353         {
    354             RL_HIST(event_t) {this, event_t::type_recursive_lock_shared} RL_HIST_END();
    355             if (is_shared_recursive_)
    356             {
    357                 shared_owner_[my_id] += 1;
    358                 shared_lock_count_ += 1;
    359                 return true;
    360             }
    361             else
    362             {
    363                 RL_ASSERT_IMPL(false, test_result_recursion_on_nonrecursive_mutex, "", info);
    364             }
    365         }
    366 
    367         if ((exclusive_owner_ == state_free)
    368             || (exclusive_owner_ == state_shared
    369                 && false == exclusive_waitset_))
    370         {
    371             //!!! probability rand
    372             if (true == failing_try_lock_
    373                 && false == try_lock_failed_
    374                 && c.rand(2, sched_type_user))
    375             {
    376                 try_lock_failed_ = true;
    377                 RL_HIST(event_t) {this, event_t::type_spuriously_failed_try_lock_shared} RL_HIST_END();
    378                 return false;
    379             }
    380             else
    381             {
    382                 sync_.acquire(c.threadx_);
    383                 shared_owner_[my_id] += 1;
    384                 shared_lock_count_ += 1;
    385                 exclusive_owner_ = state_shared;
    386                 RL_HIST(event_t) {this, event_t::type_lock_shared} RL_HIST_END();
    387                 return true;
    388             }
    389         }
    390         else
    391         {
    392             RL_VERIFY(my_id != exclusive_owner_);
    393             RL_HIST(event_t) {this, event_t::type_failed_try_lock_shared} RL_HIST_END();
    394             return false;
    395         }
    396     }
    397 
    398     virtual void unlock_shared(debug_info_param info)
    399     {
    400         RL_VERIFY(is_rw_);
    401         context& c = ctx();
    402         c.sched();
    403         sign_.check(info);
    404         RL_VERIFY(false == c.invariant_executing);
    405 
    406         thread_id_t const my_id = c.threadx_->index_;
    407 
    408         if (exclusive_owner_ != state_shared || 0 == shared_owner_[my_id])
    409         {
    410             RL_HIST(event_t) {this, event_t::type_unlock_shared} RL_HIST_END();
    411             RL_ASSERT_IMPL(false, test_result_unlocking_mutex_wo_ownership, "", info);
    412         }
    413 
    414         RL_VERIFY(shared_lock_count_);
    415         shared_owner_[my_id] -= 1;
    416         shared_lock_count_ -= 1;
    417         if (shared_lock_count_ != 0)
    418         {
    419             if (shared_owner_[my_id])
    420             {
    421                 RL_VERIFY(is_shared_recursive_);
    422                 RL_HIST(event_t) {this, event_t::type_recursive_unlock_shared} RL_HIST_END();
    423             }
    424             else
    425             {
    426                 sync_.release(c.threadx_);
    427                 RL_HIST(event_t) {this, event_t::type_unlock_shared} RL_HIST_END();
    428             }
    429             return;
    430         }
    431 
    432         sync_.release(c.threadx_);
    433         exclusive_owner_ = state_free;
    434 
    435         exclusive_waitset_.unpark_one(c, info);
    436 
    437         RL_HIST(event_t) {this, event_t::type_unlock_shared} RL_HIST_END();
    438     }
    439 
    440     virtual void unlock_exclusive_or_shared(debug_info_param info)
    441     {
    442         if (exclusive_owner_ == ctx().threadx_->index_)
    443             unlock_exclusive(info);
    444         else
    445             unlock_shared(info);
    446     }
    447 
    448     virtual bool is_signaled(debug_info_param info)
    449     {
    450         (void)info;
    451         return (exclusive_owner_ == state_free);
    452     }
    453 
    454     virtual void memory_acquire(debug_info_param info)
    455     {
    456         (void)info;
    457         sync_.acquire(ctx().threadx_);
    458     }
    459 
    460     virtual void* prepare_wait(debug_info_param info)
    461     {
    462         (void)info;
    463         return &exclusive_waitset_;
    464     }
    465 
    466 private:
    467     static thread_id_t const state_shared = (thread_id_t)-1;
    468     static thread_id_t const state_free = (thread_id_t)-2;
    469 
    470     signature<0xbabaf1f1> sign_;
    471     bool is_rw_;
    472     bool is_exclusive_recursive_;
    473     bool is_shared_recursive_;
    474     bool failing_try_lock_;
    475     sync_var<thread_count> sync_;
    476     thread_id_t exclusive_owner_;
    477     unsigned exclusive_recursion_count_;
    478     waitset<thread_count> exclusive_waitset_;
    479     waitset<thread_count> shared_waitset_;
    480     timestamp_t shared_owner_ [thread_count];
    481     unsigned shared_lock_count_;
    482     bool try_lock_failed_;
    483 
    484     RL_NOCOPY(generic_mutex_data_impl);
    485 };
    486 
    487 
    488 
    489 
    490 template<typename type>
    491 class generic_mutex : public win_waitable_object
    492 {
    493 public:
    494     generic_mutex()
    495         : impl_()
    496     {
    497     }
    498 
    499     generic_mutex(generic_mutex const&)
    500         : impl_()
    501     {
    502     }
    503 
    504     generic_mutex& operator = (generic_mutex const&)
    505     {
    506         return *this;
    507     }
    508 
    509     ~generic_mutex()
    510     {
    511     }
    512 
    513     void init(bool is_rw, bool is_exclusive_recursive, bool is_shared_recursive, bool failing_try_lock, debug_info_param info)
    514     {
    515         context& c = ctx();
    516         RL_ASSERT_IMPL(0 == impl_, test_result_double_initialization_of_mutex, "", info);
    517         sign_.check(info);
    518         impl_ = c.mutex_ctor(is_rw, is_exclusive_recursive, is_shared_recursive, failing_try_lock);
    519     }
    520 
    521     void deinit(debug_info_param info)
    522     {
    523         context& c = ctx();
    524         check(info);
    525         c.mutex_dtor(impl_);
    526         impl_ = 0;
    527     }
    528 
    529     void lock(debug_info_param info)
    530     {
    531         lock_exclusive(info);
    532     }
    533 
    534     bool lock_exclusive_timed(debug_info_param info)
    535     {
    536         return check(info)->lock_exclusive(true, info);
    537     }
    538 
    539     void unlock(debug_info_param info)
    540     {
    541         unlock_exclusive(info);
    542     }
    543 
    544     void lock_exclusive(debug_info_param info)
    545     {
    546         check(info)->lock_exclusive(false, info);
    547     }
    548 
    549     bool try_lock_exclusive(debug_info_param info)
    550     {
    551         return check(info)->try_lock_exclusive(info);
    552     }
    553 
    554     void unlock_exclusive(debug_info_param info)
    555     {
    556         check(info)->unlock_exclusive(info);
    557     }
    558 
    559     void lock_shared(debug_info_param info)
    560     {
    561         check(info)->lock_shared(info);
    562     }
    563 
    564     bool try_lock_shared(debug_info_param info)
    565     {
    566         return check(info)->try_lock_shared(info);
    567     }
    568 
    569     void unlock_shared(debug_info_param info)
    570     {
    571         check(info)->unlock_shared(info);
    572     }
    573 
    574     void unlock_exclusive_or_shared(debug_info_param info)
    575     {
    576         check(info)->unlock_exclusive_or_shared(info);
    577     }
    578 
    579 private:
    580     generic_mutex_data* impl_;
    581     signature<0x6A6cB03A> sign_;
    582 
    583     generic_mutex_data* check(debug_info_param info)
    584     {
    585         RL_ASSERT_IMPL(impl_, test_result_usage_of_non_initialized_mutex, "", info);
    586         sign_.check(info);
    587         return impl_;
    588     }
    589 
    590     virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info)
    591     {
    592         if (try_wait)
    593         {
    594             if (check(info)->try_lock_exclusive(info))
    595                 return sema_wakeup_reason_success;
    596             else
    597                 return sema_wakeup_reason_failed;
    598         }
    599         else
    600         {
    601             if (check(info)->lock_exclusive(is_timed, info))
    602                 return sema_wakeup_reason_success;
    603             else
    604                 return sema_wakeup_reason_timeout;
    605 
    606         }
    607     }
    608 
    609     virtual bool signal(debug_info_param info)
    610     {
    611         check(info)->unlock_exclusive(info);
    612         return true;
    613     }
    614 
    615     virtual bool is_signaled(debug_info_param info)
    616     {
    617         return check(info)->is_signaled(info);
    618     }
    619 
    620     virtual void memory_acquire(debug_info_param info)
    621     {
    622         check(info)->memory_acquire(info);
    623     }
    624 
    625     virtual void* prepare_wait(debug_info_param info)
    626     {
    627         return check(info)->prepare_wait(info);
    628     }
    629 };
    630 
    631 
    632 
    633 
    634 template<typename tag, bool is_recursive>
    635 class std_generic_mutex : generic_mutex<tag>, nocopy<>
    636 {
    637 public:
    638     std_generic_mutex()
    639     {
    640         generic_mutex<tag>::init(false, is_recursive, false, true, $);
    641     }
    642 
    643     ~std_generic_mutex()
    644     {
    645         generic_mutex<tag>::deinit($);
    646     }
    647 
    648     void lock(debug_info_param info)
    649     {
    650         generic_mutex<tag>::lock_exclusive(info);
    651     }
    652 
    653     bool try_lock(debug_info_param info)
    654     {
    655         return generic_mutex<tag>::try_lock_exclusive(info);
    656     }
    657 
    658     void unlock(debug_info_param info)
    659     {
    660         generic_mutex<tag>::unlock_exclusive(info);
    661     }
    662 };
    663 
    664 
    665 struct mutex_tag_std;
    666 typedef std_generic_mutex<mutex_tag_std, false> mutex;
    667 
    668 struct mutex_tag_std_recursive;
    669 typedef std_generic_mutex<mutex_tag_std_recursive, true> recursive_mutex;
    670 
    671 
    672 }
    673 
    674 #endif