medfall

A super great game engine
Log | Files | Refs

thread_base.hpp (3261B)


      1 /*  Relacy Race Detector
      2  *  Copyright (c) 2008-2013, Dmitry S. Vyukov
      3  *  All rights reserved.
      4  *  This software is provided AS-IS with no warranty, either express or implied.
      5  *  This software is distributed under a license and may not be copied,
      6  *  modified or distributed except as expressly authorized under the
      7  *  terms of the license contained in the file LICENSE in this distribution.
      8  */
      9 
     10 #ifndef RL_THREAD_BASE_HPP
     11 #define RL_THREAD_BASE_HPP
     12 #ifdef _MSC_VER
     13 #   pragma once
     14 #endif
     15 
     16 #include "base.hpp"
     17 #include "context_base.hpp"
     18 //#include "test_suite.hpp"
     19 //#include "memory_order.hpp"
     20 //#include "foreach.hpp"
     21 
     22 
     23 namespace rl
     24 {
     25 
     26 
     27 
     28 struct atomic_data;
     29 struct var_data;
     30 template<thread_id_t thread_count> struct atomic_data_impl;
     31 template<thread_id_t thread_count> struct var_data_impl;
     32 
     33 
     34 class thread_info_base
     35 {
     36 public:
     37     virtual void on_start() = 0;
     38     virtual void on_finish() = 0;
     39 
     40     virtual unsigned atomic_init(atomic_data* RL_RESTRICT data) = 0;
     41 
     42     virtual unsigned atomic_load_relaxed(atomic_data* RL_RESTRICT data) = 0;
     43     virtual unsigned atomic_load_acquire(atomic_data* RL_RESTRICT data) = 0;
     44     virtual unsigned atomic_load_seq_cst(atomic_data* RL_RESTRICT data) = 0;
     45     virtual unsigned atomic_load_relaxed_rmw(atomic_data* RL_RESTRICT data) = 0;
     46     virtual unsigned atomic_load_acquire_rmw(atomic_data* RL_RESTRICT data) = 0;
     47     virtual unsigned atomic_load_seq_cst_rmw(atomic_data* RL_RESTRICT data) = 0;
     48 
     49     virtual unsigned atomic_store_relaxed(atomic_data* RL_RESTRICT data) = 0;
     50     virtual unsigned atomic_store_release(atomic_data* RL_RESTRICT data) = 0;
     51     virtual unsigned atomic_store_seq_cst(atomic_data* RL_RESTRICT data) = 0;
     52 
     53     virtual unsigned atomic_rmw_relaxed(atomic_data* RL_RESTRICT data, bool& aba) = 0;
     54     virtual unsigned atomic_rmw_acquire(atomic_data* RL_RESTRICT data, bool& aba) = 0;
     55     virtual unsigned atomic_rmw_release(atomic_data* RL_RESTRICT data, bool& aba) = 0;
     56     virtual unsigned atomic_rmw_acq_rel(atomic_data* RL_RESTRICT data, bool& aba) = 0;
     57     virtual unsigned atomic_rmw_seq_cst(atomic_data* RL_RESTRICT data, bool& aba) = 0;
     58 
     59     virtual unpark_reason atomic_wait(atomic_data* RL_RESTRICT data, bool is_timed, bool allow_spurious_wakeup, debug_info_param info) = 0;
     60     virtual thread_id_t atomic_wake(atomic_data* RL_RESTRICT data, thread_id_t count, debug_info_param info) = 0;
     61 
     62     virtual ~thread_info_base() {} // just to calm down gcc
     63 
     64     fiber_t fiber_;
     65     thread_id_t const index_;
     66     context* ctx_;
     67     timestamp_t* const acq_rel_order_;
     68     timestamp_t last_yield_;
     69     timestamp_t& own_acq_rel_order_;
     70     unpark_reason unpark_reason_;
     71     thread_id_t temp_switch_from_;
     72     int saved_disable_preemption_;
     73     int errno_;
     74     void* (*dynamic_thread_func_)(void*);
     75     void* dynamic_thread_param_;
     76     //unsigned disable_history_;
     77 
     78     thread_info_base(thread_id_t index, timestamp_t* acq_rel_order)
     79         : index_(index)
     80         , acq_rel_order_(acq_rel_order)
     81         , own_acq_rel_order_(acq_rel_order[index])
     82     {
     83     }
     84 
     85 private:
     86     thread_info_base(thread_info_base const&);
     87     thread_info_base& operator = (thread_info_base const&);
     88 };
     89 
     90 
     91 
     92 
     93 }
     94 
     95 #endif