medfall

A super great game engine
Log | Files | Refs

atomic.hpp (24645B)


      1 /*  Relacy Race Detector
      2  *  Copyright (c) 2008-2013, Dmitry S. Vyukov
      3  *  All rights reserved.
      4  *  This software is provided AS-IS with no warranty, either express or implied.
      5  *  This software is distributed under a license and may not be copied,
      6  *  modified or distributed except as expressly authorized under the
      7  *  terms of the license contained in the file LICENSE in this distribution.
      8  */
      9 
     10 #ifndef RL_ATOMIC_HPP
     11 #define RL_ATOMIC_HPP
     12 #ifdef _MSC_VER
     13 #   pragma once
     14 #endif
     15 
     16 #include "base.hpp"
     17 #include "context.hpp"
     18 #include "memory_order.hpp"
     19 #include "signature.hpp"
     20 #include "atomic_events.hpp"
     21 #include "waitset.hpp"
     22 #include "rmw.hpp"
     23 
     24 
     25 namespace rl
     26 {
     27 
     28 
     29 template<typename T>
     30 class atomic;
     31 
     32 
     33 template<bool> struct bool_t {};
     34 
     35 
     36 
     37 template<typename T>
     38 class atomic_proxy_const
     39 {
     40 public:
     41     atomic_proxy_const(atomic<T> const /*volatile*/& var, debug_info_param info)
     42         : var_(const_cast<atomic<T>&>(var))
     43         , info_(info)
     44     {
     45     }
     46 
     47     T load(memory_order mo = mo_seq_cst) const
     48     {
     49         return var_.load(mo, info_);
     50     }
     51 
     52     operator T () const
     53     {
     54         return load();
     55     }
     56 
     57 protected:
     58     atomic<T>& var_;
     59     debug_info info_;
     60 
     61     atomic_proxy_const& operator = (atomic_proxy_const const&);
     62 };
     63 
     64 
     65 
     66 
     67 template<typename T>
     68 class atomic_proxy : public atomic_proxy_const<T>
     69 {
     70 public:
     71     typedef typename atomic_add_type<T>::type add_type;
     72 
     73     atomic_proxy(atomic<T> /*volatile*/& var, debug_info_param info)
     74         : atomic_proxy_const<T>(var, info)
     75     {
     76     }
     77 
     78     void store(T value, memory_order mo = mo_seq_cst)
     79     {
     80         this->var_.store(value, mo, this->info_);
     81     }
     82 
     83     bool compare_exchange_weak(T& cmp, T xchg, memory_order mo = mo_seq_cst)
     84     {
     85         return this->var_.compare_exchange(bool_t<true>(), cmp, xchg, mo, this->info_);
     86     }
     87 
     88     bool compare_exchange_weak(T& cmp, T xchg, memory_order mo, memory_order failure_mo)
     89     {
     90         return this->var_.compare_exchange(bool_t<true>(), cmp, xchg, mo, failure_mo, this->info_);
     91     }
     92 
     93     bool compare_exchange_strong(T& cmp, T xchg, memory_order mo = mo_seq_cst)
     94     {
     95         return this->var_.compare_exchange(bool_t<false>(), cmp, xchg, mo, this->info_);
     96     }
     97 
     98     bool compare_exchange_strong(T& cmp, T xchg, memory_order mo, memory_order failure_mo)
     99     {
    100         return this->var_.compare_exchange(bool_t<false>(), cmp, xchg, mo, failure_mo, this->info_);
    101     }
    102 
    103     T exchange(T xchg, memory_order mo = mo_seq_cst)
    104     {
    105         return this->var_.rmw(rmw_type_t<rmw_type_swap>(), xchg, mo, this->info_);
    106     }
    107 
    108     T fetch_add(add_type value, memory_order mo = mo_seq_cst)
    109     {
    110         return this->var_.rmw(rmw_type_t<rmw_type_add>(), value, mo, this->info_);
    111     }
    112 
    113     T fetch_sub(add_type value, memory_order mo = mo_seq_cst)
    114     {
    115         return this->var_.rmw(rmw_type_t<rmw_type_sub>(), value, mo, this->info_);
    116     }
    117 
    118     T fetch_and(T value, memory_order mo = mo_seq_cst)
    119     {
    120         return this->var_.rmw(rmw_type_t<rmw_type_and>(), value, mo, this->info_);
    121     }
    122 
    123     T fetch_or(T value, memory_order mo = mo_seq_cst)
    124     {
    125         return this->var_.rmw(rmw_type_t<rmw_type_or>(), value, mo, this->info_);
    126     }
    127 
    128     T fetch_xor(T value, memory_order mo = mo_seq_cst)
    129     {
    130         return this->var_.rmw(rmw_type_t<rmw_type_xor>(), value, mo, this->info_);
    131     }
    132 
    133     T operator = (T value)
    134     {
    135         store(value);
    136         return value;
    137     }
    138 
    139     T operator ++ (int)
    140     {
    141         return fetch_add(1);
    142     }
    143 
    144     T operator -- (int)
    145     {
    146         return fetch_sub(1);
    147     }
    148 
    149     T operator ++ ()
    150     {
    151         return fetch_add(1) + 1;
    152     }
    153 
    154     T operator -- ()
    155     {
    156         return fetch_sub(1) - 1;
    157     }
    158 
    159     T operator += (add_type value)
    160     {
    161         return fetch_add(value) + value;
    162     }
    163 
    164     T operator -= (add_type value)
    165     {
    166         return fetch_sub(value) + value;
    167     }
    168 
    169     T operator &= (T value)
    170     {
    171         return fetch_and(value) & value;
    172     }
    173 
    174     T operator |= (T value)
    175     {
    176         return fetch_or(value) | value;
    177     }
    178 
    179     T operator ^= (T value)
    180     {
    181         return fetch_xor(value) ^ value;
    182     }
    183 };
    184 
    185 
    186 
    187 
    188 template<typename T, bool strong_init>
    189 class generic_atomic
    190 {
    191 public:
    192     generic_atomic()
    193     {
    194         context& c = ctx();
    195         RL_VERIFY(false == c.invariant_executing);
    196         impl_ = c.atomic_ctor(this);
    197         initialized_ = false;
    198         value_ = T();
    199         already_failed_ = false;
    200 
    201         if (val(strong_init))
    202         {
    203             unsigned const index = c.threadx_->atomic_init(impl_);
    204             last_index_ = index;
    205             initialized_ = true;
    206             history_[index] = T();
    207             value_ = T();
    208         }
    209     }
    210 
    211     ~generic_atomic()
    212     {
    213         context& c = ctx();
    214         RL_VERIFY(false == c.invariant_executing);
    215         sign_.check($);
    216         c.atomic_dtor(impl_);
    217     }
    218 
    219     T debug_value() const
    220     {
    221         sign_.check($);
    222         return value_;
    223     }
    224 
    225     RL_INLINE
    226     T load(memory_order mo, debug_info_param info) const
    227     {
    228         RL_VERIFY(mo_release != mo);
    229         RL_VERIFY(mo_acq_rel != mo);
    230 
    231         switch (mo)
    232         {
    233         case mo_relaxed: return load_impl<mo_relaxed, &thread_info_base::atomic_load_relaxed>(info);
    234         case mo_consume: return load_impl<mo_consume, &thread_info_base::atomic_load_acquire>(info);
    235         case mo_acquire: return load_impl<mo_acquire, &thread_info_base::atomic_load_acquire>(info);
    236         case mo_seq_cst: return load_impl<mo_seq_cst, &thread_info_base::atomic_load_seq_cst>(info);
    237         default: break;
    238         }
    239 
    240         RL_VERIFY(false);
    241         return T();
    242     }
    243 
    244     RL_INLINE
    245     void store(T v, memory_order mo, debug_info_param info)
    246     {
    247         RL_VERIFY(mo_acquire != mo);
    248         RL_VERIFY(mo_acq_rel != mo);
    249 
    250         switch (mo)
    251         {
    252         case mo_relaxed: return store_impl<mo_relaxed, &thread_info_base::atomic_store_relaxed>(v, info);
    253         case mo_release: return store_impl<mo_release, &thread_info_base::atomic_store_release>(v, info);
    254         case mo_seq_cst: return store_impl< mo_seq_cst, &thread_info_base::atomic_store_seq_cst>(v, info);
    255         default: break;
    256         }
    257 
    258         RL_VERIFY(false);
    259     }
    260 
    261     RL_INLINE
    262     bool compare_exchange_weak(T& cmp, T xchg, memory_order mo, debug_info_param info)
    263     {
    264         return compare_exchange(bool_t<true>(), cmp, xchg, mo, info);
    265     }
    266 
    267     RL_INLINE
    268     bool compare_exchange_strong(T& cmp, T xchg, memory_order mo, debug_info_param info)
    269     {
    270         return compare_exchange(bool_t<false>(), cmp, xchg, mo, info);
    271     }
    272 
    273     RL_INLINE
    274     bool compare_exchange_weak(T& cmp, T xchg, memory_order mo, debug_info_param info, memory_order failure_mo, debug_info_param)
    275     {
    276         return compare_exchange(bool_t<true>(), cmp, xchg, mo, failure_mo, info);
    277     }
    278 
    279     RL_INLINE
    280     bool compare_exchange_strong(T& cmp, T xchg, memory_order mo, debug_info_param info, memory_order failure_mo, debug_info_param)
    281     {
    282         return compare_exchange(bool_t<false>(), cmp, xchg, mo, failure_mo, info);
    283     }
    284 
    285     template<bool spurious_failures>
    286     RL_INLINE
    287     bool compare_exchange(bool_t<spurious_failures>, T& cmp, T xchg, memory_order mo, debug_info_param info)
    288     {
    289         switch (mo)
    290         {
    291         case mo_relaxed: return compare_swap_impl<spurious_failures, mo_relaxed, &thread_info_base::atomic_rmw_relaxed, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);
    292         case mo_consume: return compare_swap_impl<spurious_failures, mo_consume, &thread_info_base::atomic_rmw_acquire, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);
    293         case mo_acquire: return compare_swap_impl<spurious_failures, mo_acquire, &thread_info_base::atomic_rmw_acquire, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);
    294         case mo_release: return compare_swap_impl<spurious_failures, mo_release, &thread_info_base::atomic_rmw_release, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);
    295         case mo_acq_rel: return compare_swap_impl<spurious_failures, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);
    296         case mo_seq_cst: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_seq_cst, &thread_info_base::atomic_load_seq_cst_rmw>(cmp, xchg, info);
    297         }
    298 
    299         RL_VERIFY(false);
    300         return false;
    301     }
    302 
    303     template<bool spurious_failures>
    304     RL_INLINE
    305     bool compare_exchange(bool_t<spurious_failures>, T& cmp, T xchg, memory_order mo, memory_order failure_mo, debug_info_param info)
    306     {
    307         switch (mo)
    308         {
    309         case mo_relaxed:
    310             {
    311                 RL_VERIFY(mo_relaxed == failure_mo);
    312                 return compare_swap_impl<spurious_failures, mo_relaxed, &thread_info_base::atomic_rmw_relaxed, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);
    313             }
    314         case mo_consume:
    315             {
    316                 RL_VERIFY(mo_relaxed == failure_mo || mo_consume == failure_mo);
    317                 switch (failure_mo)
    318                 {
    319                 case mo_relaxed: return compare_swap_impl<spurious_failures, mo_consume, &thread_info_base::atomic_rmw_acquire, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);
    320                 case mo_consume: return compare_swap_impl<spurious_failures, mo_consume, &thread_info_base::atomic_rmw_acquire, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);
    321                 default: RL_VERIFY(false); return false;
    322                 }
    323             }
    324         case mo_acquire:
    325             {
    326                 RL_VERIFY(mo_relaxed == failure_mo || mo_consume == failure_mo || mo_acquire == failure_mo);
    327                 switch (failure_mo)
    328                 {
    329                 case mo_relaxed: return compare_swap_impl<spurious_failures, mo_acquire, &thread_info_base::atomic_rmw_acquire, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);
    330                 case mo_consume: return compare_swap_impl<spurious_failures, mo_acquire, &thread_info_base::atomic_rmw_acquire, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);
    331                 case mo_acquire: return compare_swap_impl<spurious_failures, mo_acquire, &thread_info_base::atomic_rmw_acquire, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);
    332                 default: RL_VERIFY(false); return false;
    333                 }
    334             }
    335         case mo_release:
    336             {
    337                 RL_VERIFY(mo_relaxed == failure_mo);
    338                 return compare_swap_impl<spurious_failures, mo_release, &thread_info_base::atomic_rmw_release, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);
    339             }
    340         case mo_acq_rel:
    341             {
    342                 RL_VERIFY(mo_relaxed == failure_mo || mo_consume == failure_mo || mo_acquire == failure_mo);
    343                 switch (failure_mo)
    344                 {
    345                 case mo_relaxed: return compare_swap_impl<spurious_failures, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);
    346                 case mo_consume: return compare_swap_impl<spurious_failures, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);
    347                 case mo_acquire: return compare_swap_impl<spurious_failures, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);
    348                 default: RL_VERIFY(false); return false;
    349                 }
    350             }
    351         case mo_seq_cst:
    352             {
    353                 RL_VERIFY(mo_relaxed == failure_mo || mo_consume == failure_mo || mo_acquire == failure_mo || mo_seq_cst == failure_mo);
    354                 switch (failure_mo)
    355                 {
    356                 case mo_relaxed: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info);
    357                 case mo_consume: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);
    358                 case mo_acquire: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info);
    359                 case mo_seq_cst: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_seq_cst, &thread_info_base::atomic_load_seq_cst_rmw>(cmp, xchg, info);
    360                 default: RL_VERIFY(false); return false;
    361                 }
    362             }
    363         }
    364 
    365         RL_VERIFY(false);
    366         return false;
    367     }
    368 
    369     T exchange(T xchg, memory_order mo, debug_info_param info)
    370     {
    371         return rmw(rmw_type_t<rmw_type_swap>(), xchg, mo, info);
    372     }
    373 
    374     T fetch_add(typename atomic_add_type<T>::type value, memory_order mo, debug_info_param info)
    375     {
    376         return rmw(rmw_type_t<rmw_type_add>(), value, mo, info);
    377     }
    378 
    379     T fetch_sub(typename atomic_add_type<T>::type value, memory_order mo, debug_info_param info)
    380     {
    381         return rmw(rmw_type_t<rmw_type_sub>(), value, mo, info);
    382     }
    383 
    384     T fetch_and(T value, memory_order mo, debug_info_param info)
    385     {
    386         return rmw(rmw_type_t<rmw_type_and>(), value, mo, info);
    387     }
    388 
    389     T fetch_or(T value, memory_order mo, debug_info_param info)
    390     {
    391         return rmw(rmw_type_t<rmw_type_or>(), value, mo, info);
    392     }
    393 
    394     T fetch_xor(T value, memory_order mo, debug_info_param info)
    395     {
    396         return rmw(rmw_type_t<rmw_type_xor>(), value, mo, info);
    397     }
    398 
    399     template<typename Y, rmw_type_e type>
    400     RL_INLINE
    401     T rmw(rmw_type_t<type>, Y op, memory_order mo, debug_info_param info)
    402     {
    403         switch (mo)
    404         {
    405         case mo_relaxed: return rmw_impl<Y, mo_relaxed, &thread_info_base::atomic_rmw_relaxed>(rmw_type_t<type>(), op, info);
    406         case mo_consume: return rmw_impl<Y, mo_consume, &thread_info_base::atomic_rmw_acquire>(rmw_type_t<type>(), op, info);
    407         case mo_acquire: return rmw_impl<Y, mo_acquire, &thread_info_base::atomic_rmw_acquire>(rmw_type_t<type>(), op, info);
    408         case mo_release: return rmw_impl<Y, mo_release, &thread_info_base::atomic_rmw_release>(rmw_type_t<type>(), op, info);
    409         case mo_acq_rel: return rmw_impl<Y, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel>(rmw_type_t<type>(), op, info);
    410         case mo_seq_cst: return rmw_impl<Y, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst>(rmw_type_t<type>(), op, info);
    411         }
    412 
    413         RL_VERIFY(false);
    414         return T();
    415     }
    416 
    417     unpark_reason wait(context& c, bool is_timed, bool allow_spurious_wakeup, debug_info_param info)
    418     {
    419         sign_.check(info);
    420         return c.threadx_->atomic_wait(impl_, is_timed, allow_spurious_wakeup, info);
    421     }
    422 
    423     thread_id_t wake(context& c, thread_id_t count, debug_info_param info)
    424     {
    425         sign_.check(info);
    426         return c.threadx_->atomic_wake(impl_, count, info);
    427     }
    428 
    429 private:
    430     T value_;
    431     T history_ [atomic_history_size];
    432     atomic_data* impl_;
    433     unsigned last_index_;
    434     signature<987654321> sign_;
    435     bool initialized_;
    436     bool already_failed_;
    437 
    438     template<memory_order mo, unsigned (thread_info_base::*impl)(atomic_data* RL_RESTRICT data)>
    439     T load_impl(debug_info_param info) const
    440     {
    441         context& c = ctx();
    442         c.sched();
    443         sign_.check(info);
    444 
    445         if (false == c.invariant_executing)
    446         {
    447             unsigned const index = (c.threadx_->*impl)(impl_);
    448             if ((unsigned)-1 == index)
    449             {
    450                 RL_HIST(atomic_load_event<T>) {this, T(), mo, false} RL_HIST_END();
    451                 RL_ASSERT_IMPL(false, test_result_unitialized_access, "", info);
    452             }
    453             T const v = history_[index];
    454 
    455             RL_HIST(atomic_load_event<T>) {this, v, mo, last_index_ != index} RL_HIST_END();
    456 
    457             return v;
    458         }
    459         else
    460         {
    461             if (false == initialized_)
    462             {
    463                 RL_HIST(atomic_load_event<T>) {this, T(), mo, false} RL_HIST_END();
    464                 RL_ASSERT_IMPL(false, test_result_unitialized_access, "", info);
    465             }
    466             return value_;
    467         }
    468     }
    469 
    470     template<memory_order mo, unsigned (thread_info_base::*impl)(atomic_data* RL_RESTRICT data)>
    471     void store_impl(T v, debug_info_param info)
    472     {
    473         context& c = ctx();
    474         RL_VERIFY(false == c.invariant_executing);
    475         c.sched();
    476         sign_.check(info);
    477 
    478         unsigned const index = (c.threadx_->*impl)(impl_);
    479         
    480         T const prev = value_;
    481         last_index_ = index;
    482         history_[index] = v;
    483         value_ = v;
    484         initialized_ = true;
    485         RL_HIST(atomic_store_event<T>) {this, prev, v, mo} RL_HIST_END();
    486     }
    487 
    488     template<bool spurious_failures, memory_order mo, unsigned (thread_info_base::*impl)(atomic_data* RL_RESTRICT data, bool&), memory_order failure_mo, unsigned (thread_info_base::*failure_impl)(atomic_data* RL_RESTRICT data)>
    489     bool compare_swap_impl(T& cmp, T xchg, debug_info_param info)
    490     {
    491         context& c = ctx();
    492         RL_VERIFY(false == c.invariant_executing);
    493         c.sched();
    494         sign_.check(info);
    495 
    496         if (false == initialized_)
    497         {
    498             RL_HIST(atomic_load_event<T>) {this, T(), mo, false} RL_HIST_END();
    499             RL_ASSERT_IMPL(false, test_result_unitialized_access, "", info);
    500         }
    501 
    502         bool success = false;
    503         bool spurious_failure = false;
    504         bool aba = false;
    505 
    506         T const cmpv = cmp;
    507         T const current = value_;
    508         if (current == cmpv)
    509         {
    510             if (val(spurious_failures))
    511             {
    512                 if (c.is_random_sched())
    513                 {
    514                     spurious_failure = (0 == c.rand(4, sched_type_cas_fail));
    515                 }
    516                 else
    517                 {
    518                     if (false == already_failed_)
    519                     {
    520                         spurious_failure = 0 == c.rand(2, sched_type_cas_fail);
    521                         if (spurious_failure)
    522                             already_failed_ = true;
    523                     }
    524                 }
    525             }
    526 
    527             if (false == spurious_failure)
    528             {
    529                 success = true;
    530                 unsigned const index = (c.threadx_->*impl)(impl_, aba);
    531                 value_ = xchg;
    532                 last_index_ = index;
    533                 history_[index] = xchg;
    534             }
    535         }
    536 
    537         if (false == success)
    538         {
    539             (c.threadx_->*failure_impl)(impl_);
    540             cmp = current;
    541         }
    542 
    543         RL_HIST(atomic_cas_event<T>) {RL_INFO, this, current, cmpv, xchg, mo, success, spurious_failure, aba} RL_HIST_END();
    544 
    545         return success;
    546     }
    547 
    548     template<typename Y, memory_order mo, unsigned (thread_info_base::*impl)(atomic_data* RL_RESTRICT data, bool&), rmw_type_e type>
    549     T rmw_impl(rmw_type_t<type>, Y op, debug_info_param info)
    550     {
    551         context& c = ctx();
    552         RL_VERIFY(false == c.invariant_executing);
    553         c.sched();
    554         sign_.check(info);
    555 
    556         if (false == initialized_)
    557         {
    558             RL_HIST(atomic_load_event<T>) {this, T(), mo, false} RL_HIST_END();
    559             RL_ASSERT_IMPL(false, test_result_unitialized_access, "", info);
    560         }
    561 
    562         bool aba;
    563         unsigned const index = (c.threadx_->*impl)(impl_, aba);
    564 
    565         T const prev_value = value_;
    566         T const new_value = perform_rmw(rmw_type_t<type>(), prev_value, op);
    567         value_ = new_value;
    568         last_index_ = index;
    569         history_[index] = new_value;
    570 
    571         typedef atomic_rmw_event<T, Y> atomic_rmw_event_t;
    572         RL_HIST(atomic_rmw_event_t) {RL_INFO, this, prev_value, op, new_value, mo, type} RL_HIST_END();
    573 
    574         return prev_value;
    575     }
    576 
    577     RL_NOCOPY(generic_atomic);
    578 };
    579 
    580 
    581 
    582 
    583 
    584 template<typename T>
    585 class atomic : public generic_atomic<T, false>
    586 {
    587 public:
    588     atomic()
    589     {
    590     }
    591 
    592     /*explicit*/ atomic(T value)
    593     {
    594         this->store(value, mo_relaxed, $);
    595     }
    596 
    597     atomic_proxy_const<T> operator () (debug_info_param info) const /*volatile*/
    598     {
    599         return atomic_proxy_const<T>(*this, info);
    600     }
    601 
    602     atomic_proxy<T> operator () (debug_info_param info) /*volatile*/
    603     {
    604         return atomic_proxy<T>(*this, info);
    605     }
    606 
    607     bool is_lock_free() const /*volatile*/
    608     {
    609         return true;
    610     }
    611 
    612     friend class atomic_proxy<T>;
    613     friend class atomic_proxy_const<T>;
    614 
    615     RL_NOCOPY(atomic);
    616 };
    617 
    618 
    619 
    620 
    621 typedef atomic<bool> atomic_bool;
    622 typedef atomic<void*> atomic_address;
    623 
    624 typedef atomic<char> atomic_char;
    625 typedef atomic<signed char> atomic_schar;
    626 typedef atomic<unsigned char> atomic_uchar;
    627 typedef atomic<short> atomic_short;
    628 typedef atomic<unsigned short> atomic_ushort;
    629 typedef atomic<int> atomic_int;
    630 typedef atomic<unsigned int> atomic_uint;
    631 typedef atomic<long> atomic_long;
    632 typedef atomic<unsigned long> atomic_ulong;
    633 typedef atomic<long long> atomic_llong;
    634 typedef atomic<unsigned long long> atomic_ullong;
    635 //typedef atomic<char16_t> atomic_char16_t;
    636 //typedef atomic<char32_t> atomic_char32_t;
    637 typedef atomic<wchar_t> atomic_wchar_t;
    638 
    639 //typedef atomic<int_least8_t> atomic_int_least8_t;
    640 //typedef atomic<uint_least8_t> atomic_uint_least8_t;
    641 //typedef atomic<int_least16_t> atomic_int_least16_t;
    642 //typedef atomic<uint_least16_t> atomic_uint_least16_t;
    643 //typedef atomic<int_least32_t> atomic_int_least32_t;
    644 //typedef atomic<uint_least32_t> atomic_uint_least32_t;
    645 //typedef atomic<int_least64_t> atomic_int_least64_t;
    646 //typedef atomic<uint_least64_t> atomic_uint_least64_t;
    647 //typedef atomic<int_fast8_t> atomic_int_fast8_t;
    648 //typedef atomic<uint_fast8_t> atomic_uint_fast8_t;
    649 //typedef atomic<int_fast16_t> atomic_int_fast16_t;
    650 //typedef atomic<uint_fast16_t> atomic_uint_fast16_t;
    651 //typedef atomic<int_fast32_t> atomic_int_fast32_t;
    652 //typedef atomic<uint_fast32_t> atomic_uint_fast32_t;
    653 //typedef atomic<int_fast64_t> atomic_int_fast64_t;
    654 //typedef atomic<uint_fast64_t> atomic_uint_fast64_t;
    655 typedef atomic<intptr_t> atomic_intptr_t;
    656 typedef atomic<uintptr_t> atomic_uintptr_t;
    657 typedef atomic<size_t> atomic_size_t;
    658 //typedef atomic<ssize_t> atomic_ssize_t;
    659 typedef atomic<ptrdiff_t> atomic_ptrdiff_t;
    660 //typedef atomic<intmax_t> atomic_intmax_t;
    661 //typedef atomic<uintmax_t> atomic_uintmax_t;
    662 
    663 
    664 
    665 
    666 template<thread_id_t thread_count>
    667 struct atomic_data_impl : atomic_data
    668 {
    669     typedef thread_info<thread_count> thread_info_t;
    670 
    671     struct history_record
    672     {
    673         timestamp_t acq_rel_order_ [thread_count];
    674         timestamp_t last_seen_order_ [thread_count];
    675 
    676         bool busy_;
    677         bool seq_cst_;
    678         thread_id_t thread_id_;
    679         timestamp_t acq_rel_timestamp_;
    680     };
    681 
    682     static size_t const history_size = atomic_history_size;
    683     aligned<history_record> history_ [history_size];
    684     unsigned current_index_;
    685     waitset<thread_count> futex_ws_;
    686     sync_var<thread_count> futex_sync_;
    687 
    688     atomic_data_impl()
    689     {
    690         current_index_ = 0;
    691         history_record& rec = history_[0];
    692         history_[atomic_history_size - 1].busy_ = false;
    693 
    694         rec.busy_ = false;
    695         rec.seq_cst_ = false;
    696         rec.thread_id_ = (thread_id_t)-1;
    697     }
    698 
    699     atomic_data_impl(thread_info_t& th)
    700     {
    701         current_index_ = 0;
    702         history_[atomic_history_size - 1].busy_ = false;
    703 
    704         history_record& rec = history_[0];
    705         rec.busy_ = true;
    706         rec.seq_cst_ = false;
    707         rec.thread_id_ = th.index_;
    708 
    709         th.own_acq_rel_order_ += 1;
    710         rec.acq_rel_timestamp_ = th.own_acq_rel_order_;
    711 
    712         foreach<thread_count>(rec.acq_rel_order_, assign_zero);
    713         foreach<thread_count>(rec.last_seen_order_, assign<(timestamp_t)-1>);
    714         rec.last_seen_order_[th.index_] = th.own_acq_rel_order_;
    715     }
    716 };
    717 
    718 
    719 }
    720 
    721 
    722 
    723 #endif