medfall

Unnamed repository; edit this file 'description' to name the repository.
Log | Files | Refs

commit 12a61765a9263b495e5a86fd88270d6f3b12333d
parent b3aa6be5d4b600e4c7d35cdf230bdb1f0dfdb254
Author: Michael Savage <mikejsavage@gmail.com>
Date:   Sat Aug 20 20:48:50 +0100

Add relacy tests for the SPSC queue

Diffstat:
Makefile | 5++++-
libs/relacy/CHANGES.TXT | 25+++++++++++++++++++++++++
libs/relacy/LICENSE-BSD.TXT | 16++++++++++++++++
libs/relacy/LICENSE-GPL.TXT | 674+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/LICENSE.TXT | 5+++++
libs/relacy/relacy/atomic.hpp | 723+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/atomic_events.hpp | 148+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/atomic_fence.hpp | 83+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/backoff.hpp | 57+++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/base.hpp | 144+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/cli.hpp | 52++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/cli_interlocked.hpp | 67+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/cli_var.hpp | 158+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/cli_volatile.hpp | 161+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/context.hpp | 1291+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/context_addr_hash.hpp | 81+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/context_base.hpp | 322+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/context_base_impl.hpp | 72++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/context_bound_scheduler.hpp | 168+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/defs.hpp | 144+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/dyn_thread.hpp | 53+++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/dyn_thread_ctx.hpp | 127+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/foreach.hpp | 133+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/full_search_scheduler.hpp | 421+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/history.hpp | 205+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/java.hpp | 301+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/java_atomic.hpp | 155+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/java_var.hpp | 157+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/java_volatile.hpp | 158+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/memory.hpp | 241+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/memory_order.hpp | 54++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/pch.hpp | 76++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/platform.hpp | 257+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/pthread.h | 21+++++++++++++++++++++
libs/relacy/relacy/random.hpp | 55+++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/random_scheduler.hpp | 141+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/relacy.hpp | 73+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/relacy_cli.hpp | 29+++++++++++++++++++++++++++++
libs/relacy/relacy/relacy_java.hpp | 29+++++++++++++++++++++++++++++
libs/relacy/relacy/relacy_std.hpp | 82+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/rmw.hpp | 101+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/scheduler.hpp | 332+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/signature.hpp | 84+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/slab_allocator.hpp | 157+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/stdlib/condition_variable.hpp | 372+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/stdlib/event.hpp | 386+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/stdlib/mutex.hpp | 674+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/stdlib/pthread.hpp | 588+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/stdlib/semaphore.hpp | 558+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/stdlib/windows.hpp | 617+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/sync_var.hpp | 66++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/test_params.hpp | 90+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/test_result.hpp | 111+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/test_suite.hpp | 48++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/thread.hpp | 415+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/thread_base.hpp | 95+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/thread_local.hpp | 192+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/thread_local_ctx.hpp | 122+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/var.hpp | 388+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/volatile.hpp | 24++++++++++++++++++++++++
libs/relacy/relacy/waitset.hpp | 198+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
libs/relacy/relacy/windows.h | 21+++++++++++++++++++++
relacy.cc | 56++++++++++++++++++++++++++++++++++++++++++++++++++++++++
63 files changed, 12858 insertions(+), 1 deletion(-)
diff --git a/Makefile b/Makefile @@ -2,7 +2,7 @@ BINS := medfall pp sound MODULES := bsp.so btt.so hm.so sm.so audio.so STBS := truetype image image_write perlin -all: opengl33.cc $(BINS) $(MODULES) +all: opengl33.cc $(BINS) $(MODULES) test_lockfree # Binary dependencies medfall: main.o gl.o log.o memory_arena.o opengl33.o @@ -16,6 +16,9 @@ btt.so: btt.o heightmap.o gpubtt.o skybox.o audio.so: test_audio.o audio.o wave.o linux_audio.o sm.so: shadow_map.o +test_lockfree: relacy.cc nonblocking_fixed_spmc_queue.h nonblocking_fixed_spsc_queue.h + $(CXX) relacy.cc -o test_lockfree $(CXXFLAGS) $(LDFLAGS) -std=c++98 -fexceptions -frtti -Wno-missing-field-initializers + # Common dependencies COMMON_OBJS := log.o memory_arena.o work_queue.o immediate.o benchmark.o stb_truetype.o stb_image.o opengl33.o glsl.o diff --git a/libs/relacy/CHANGES.TXT b/libs/relacy/CHANGES.TXT @@ -0,0 +1,25 @@ +Version 2.4 +Features: ++ Support for futex(FUTEX_WAIT/FUTEX_WAKE) ++ Linux/Darwin performance improved (2.5x for Linux, 7x for Darwin) +Fixes: ++ Fixed a bunch of issues with WaitForMultipleObjects()/SignalObjectAndWait() ++ Fixed rare spurious memory leak reports related to test progress reporting + +Version 2.3 +Features: ++ Support for FlushProcessWriteBuffers() + +Version 2.2 +Features: ++ Support for pthread_mutex_timedlock() ++ Support for ETIMEDOUT, EINTR in pthread_cond_timedwait()/pthread_cond_wait() ++ rl::hash_ptr(p, sz) function which provides deterministic hashing of pointers +Fixes: ++ Win32 mutex is now recursive ++ Compilation issue on MSVC x64 when RL_DEBUGBREAK_ON_ASSERT/RL_DEBUGBREAK_ON_FAILURE defined ++ Fixed OOM crash when execution history is very large ++ Fixed rare crash during iteration count estimation in context bound scheduler ++ Fixed bug in pthread_rwlock/SRWLOCK that at most 2 readers may acquire it simultaneously ++ Fixed bug regarding false race detection when simulation runs for very long time (int overflow) + diff --git a/libs/relacy/LICENSE-BSD.TXT b/libs/relacy/LICENSE-BSD.TXT @@ -0,0 +1,16 @@ +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + - Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + - Redistributions in binary form must reproduce the above copyright notice, this list of conditions + and the following disclaimer in the documentation and/or other materials provided with the distribution. + - The name of the owner may not be used to endorse or promote products derived from this software + without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE OWNER "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE OWNER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, +OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/libs/relacy/LICENSE-GPL.TXT b/libs/relacy/LICENSE-GPL.TXT @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + <program> Copyright (C) <year> <name of author> + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +<http://www.gnu.org/licenses/>. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +<http://www.gnu.org/philosophy/why-not-lgpl.html>. diff --git a/libs/relacy/LICENSE.TXT b/libs/relacy/LICENSE.TXT @@ -0,0 +1,5 @@ +Relacy Race Detector +Copyright (c) 2008-2009, Dmitry S. Vyukov +All rights reserved. +This software is dual licensed under the BSD and GPLv3 licenses. +See the files LICENSE-BSD.TXT and LICENSE-GPL.TXT for details. diff --git a/libs/relacy/relacy/atomic.hpp b/libs/relacy/relacy/atomic.hpp @@ -0,0 +1,723 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_ATOMIC_HPP +#define RL_ATOMIC_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "context.hpp" +#include "memory_order.hpp" +#include "signature.hpp" +#include "atomic_events.hpp" +#include "waitset.hpp" +#include "rmw.hpp" + + +namespace rl +{ + + +template<typename T> +class atomic; + + +template<bool> struct bool_t {}; + + + +template<typename T> +class atomic_proxy_const +{ +public: + atomic_proxy_const(atomic<T> const /*volatile*/& var, debug_info_param info) + : var_(const_cast<atomic<T>&>(var)) + , info_(info) + { + } + + T load(memory_order mo = mo_seq_cst) const + { + return var_.load(mo, info_); + } + + operator T () const + { + return load(); + } + +protected: + atomic<T>& var_; + debug_info info_; + + atomic_proxy_const& operator = (atomic_proxy_const const&); +}; + + + + +template<typename T> +class atomic_proxy : public atomic_proxy_const<T> +{ +public: + typedef typename atomic_add_type<T>::type add_type; + + atomic_proxy(atomic<T> /*volatile*/& var, debug_info_param info) + : atomic_proxy_const<T>(var, info) + { + } + + void store(T value, memory_order mo = mo_seq_cst) + { + this->var_.store(value, mo, this->info_); + } + + bool compare_exchange_weak(T& cmp, T xchg, memory_order mo = mo_seq_cst) + { + return this->var_.compare_exchange(bool_t<true>(), cmp, xchg, mo, this->info_); + } + + bool compare_exchange_weak(T& cmp, T xchg, memory_order mo, memory_order failure_mo) + { + return this->var_.compare_exchange(bool_t<true>(), cmp, xchg, mo, failure_mo, this->info_); + } + + bool compare_exchange_strong(T& cmp, T xchg, memory_order mo = mo_seq_cst) + { + return this->var_.compare_exchange(bool_t<false>(), cmp, xchg, mo, this->info_); + } + + bool compare_exchange_strong(T& cmp, T xchg, memory_order mo, memory_order failure_mo) + { + return this->var_.compare_exchange(bool_t<false>(), cmp, xchg, mo, failure_mo, this->info_); + } + + T exchange(T xchg, memory_order mo = mo_seq_cst) + { + return this->var_.rmw(rmw_type_t<rmw_type_swap>(), xchg, mo, this->info_); + } + + T fetch_add(add_type value, memory_order mo = mo_seq_cst) + { + return this->var_.rmw(rmw_type_t<rmw_type_add>(), value, mo, this->info_); + } + + T fetch_sub(add_type value, memory_order mo = mo_seq_cst) + { + return this->var_.rmw(rmw_type_t<rmw_type_sub>(), value, mo, this->info_); + } + + T fetch_and(T value, memory_order mo = mo_seq_cst) + { + return this->var_.rmw(rmw_type_t<rmw_type_and>(), value, mo, this->info_); + } + + T fetch_or(T value, memory_order mo = mo_seq_cst) + { + return this->var_.rmw(rmw_type_t<rmw_type_or>(), value, mo, this->info_); + } + + T fetch_xor(T value, memory_order mo = mo_seq_cst) + { + return this->var_.rmw(rmw_type_t<rmw_type_xor>(), value, mo, this->info_); + } + + T operator = (T value) + { + store(value); + return value; + } + + T operator ++ (int) + { + return fetch_add(1); + } + + T operator -- (int) + { + return fetch_sub(1); + } + + T operator ++ () + { + return fetch_add(1) + 1; + } + + T operator -- () + { + return fetch_sub(1) - 1; + } + + T operator += (add_type value) + { + return fetch_add(value) + value; + } + + T operator -= (add_type value) + { + return fetch_sub(value) + value; + } + + T operator &= (T value) + { + return fetch_and(value) & value; + } + + T operator |= (T value) + { + return fetch_or(value) | value; + } + + T operator ^= (T value) + { + return fetch_xor(value) ^ value; + } +}; + + + + +template<typename T, bool strong_init> +class generic_atomic +{ +public: + generic_atomic() + { + context& c = ctx(); + RL_VERIFY(false == c.invariant_executing); + impl_ = c.atomic_ctor(this); + initialized_ = false; + value_ = T(); + already_failed_ = false; + + if (val(strong_init)) + { + unsigned const index = c.threadx_->atomic_init(impl_); + last_index_ = index; + initialized_ = true; + history_[index] = T(); + value_ = T(); + } + } + + ~generic_atomic() + { + context& c = ctx(); + RL_VERIFY(false == c.invariant_executing); + sign_.check($); + c.atomic_dtor(impl_); + } + + T debug_value() const + { + sign_.check($); + return value_; + } + + RL_INLINE + T load(memory_order mo, debug_info_param info) const + { + RL_VERIFY(mo_release != mo); + RL_VERIFY(mo_acq_rel != mo); + + switch (mo) + { + case mo_relaxed: return load_impl<mo_relaxed, &thread_info_base::atomic_load_relaxed>(info); + case mo_consume: return load_impl<mo_consume, &thread_info_base::atomic_load_acquire>(info); + case mo_acquire: return load_impl<mo_acquire, &thread_info_base::atomic_load_acquire>(info); + case mo_seq_cst: return load_impl<mo_seq_cst, &thread_info_base::atomic_load_seq_cst>(info); + default: break; + } + + RL_VERIFY(false); + return T(); + } + + RL_INLINE + void store(T v, memory_order mo, debug_info_param info) + { + RL_VERIFY(mo_acquire != mo); + RL_VERIFY(mo_acq_rel != mo); + + switch (mo) + { + case mo_relaxed: return store_impl<mo_relaxed, &thread_info_base::atomic_store_relaxed>(v, info); + case mo_release: return store_impl<mo_release, &thread_info_base::atomic_store_release>(v, info); + case mo_seq_cst: return store_impl< mo_seq_cst, &thread_info_base::atomic_store_seq_cst>(v, info); + default: break; + } + + RL_VERIFY(false); + } + + RL_INLINE + bool compare_exchange_weak(T& cmp, T xchg, memory_order mo, debug_info_param info) + { + return compare_exchange(bool_t<true>(), cmp, xchg, mo, info); + } + + RL_INLINE + bool compare_exchange_strong(T& cmp, T xchg, memory_order mo, debug_info_param info) + { + return compare_exchange(bool_t<false>(), cmp, xchg, mo, info); + } + + RL_INLINE + bool compare_exchange_weak(T& cmp, T xchg, memory_order mo, debug_info_param info, memory_order failure_mo, debug_info_param) + { + return compare_exchange(bool_t<true>(), cmp, xchg, mo, failure_mo, info); + } + + RL_INLINE + bool compare_exchange_strong(T& cmp, T xchg, memory_order mo, debug_info_param info, memory_order failure_mo, debug_info_param) + { + return compare_exchange(bool_t<false>(), cmp, xchg, mo, failure_mo, info); + } + + template<bool spurious_failures> + RL_INLINE + bool compare_exchange(bool_t<spurious_failures>, T& cmp, T xchg, memory_order mo, debug_info_param info) + { + switch (mo) + { + case mo_relaxed: return compare_swap_impl<spurious_failures, mo_relaxed, &thread_info_base::atomic_rmw_relaxed, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info); + case mo_consume: return compare_swap_impl<spurious_failures, mo_consume, &thread_info_base::atomic_rmw_acquire, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info); + case mo_acquire: return compare_swap_impl<spurious_failures, mo_acquire, &thread_info_base::atomic_rmw_acquire, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info); + case mo_release: return compare_swap_impl<spurious_failures, mo_release, &thread_info_base::atomic_rmw_release, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info); + case mo_acq_rel: return compare_swap_impl<spurious_failures, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info); + case mo_seq_cst: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_seq_cst, &thread_info_base::atomic_load_seq_cst_rmw>(cmp, xchg, info); + } + + RL_VERIFY(false); + return false; + } + + template<bool spurious_failures> + RL_INLINE + bool compare_exchange(bool_t<spurious_failures>, T& cmp, T xchg, memory_order mo, memory_order failure_mo, debug_info_param info) + { + switch (mo) + { + case mo_relaxed: + { + RL_VERIFY(mo_relaxed == failure_mo); + return compare_swap_impl<spurious_failures, mo_relaxed, &thread_info_base::atomic_rmw_relaxed, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info); + } + case mo_consume: + { + RL_VERIFY(mo_relaxed == failure_mo || mo_consume == failure_mo); + switch (failure_mo) + { + case mo_relaxed: return compare_swap_impl<spurious_failures, mo_consume, &thread_info_base::atomic_rmw_acquire, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info); + case mo_consume: return compare_swap_impl<spurious_failures, mo_consume, &thread_info_base::atomic_rmw_acquire, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info); + default: RL_VERIFY(false); return false; + } + } + case mo_acquire: + { + RL_VERIFY(mo_relaxed == failure_mo || mo_consume == failure_mo || mo_acquire == failure_mo); + switch (failure_mo) + { + case mo_relaxed: return compare_swap_impl<spurious_failures, mo_acquire, &thread_info_base::atomic_rmw_acquire, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info); + case mo_consume: return compare_swap_impl<spurious_failures, mo_acquire, &thread_info_base::atomic_rmw_acquire, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info); + case mo_acquire: return compare_swap_impl<spurious_failures, mo_acquire, &thread_info_base::atomic_rmw_acquire, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info); + default: RL_VERIFY(false); return false; + } + } + case mo_release: + { + RL_VERIFY(mo_relaxed == failure_mo); + return compare_swap_impl<spurious_failures, mo_release, &thread_info_base::atomic_rmw_release, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info); + } + case mo_acq_rel: + { + RL_VERIFY(mo_relaxed == failure_mo || mo_consume == failure_mo || mo_acquire == failure_mo); + switch (failure_mo) + { + case mo_relaxed: return compare_swap_impl<spurious_failures, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info); + case mo_consume: return compare_swap_impl<spurious_failures, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info); + case mo_acquire: return compare_swap_impl<spurious_failures, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info); + default: RL_VERIFY(false); return false; + } + } + case mo_seq_cst: + { + RL_VERIFY(mo_relaxed == failure_mo || mo_consume == failure_mo || mo_acquire == failure_mo || mo_seq_cst == failure_mo); + switch (failure_mo) + { + case mo_relaxed: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_relaxed, &thread_info_base::atomic_load_relaxed_rmw>(cmp, xchg, info); + case mo_consume: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_consume, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info); + case mo_acquire: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_acquire, &thread_info_base::atomic_load_acquire_rmw>(cmp, xchg, info); + case mo_seq_cst: return compare_swap_impl<spurious_failures, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst, mo_seq_cst, &thread_info_base::atomic_load_seq_cst_rmw>(cmp, xchg, info); + default: RL_VERIFY(false); return false; + } + } + } + + RL_VERIFY(false); + return false; + } + + T exchange(T xchg, memory_order mo, debug_info_param info) + { + return rmw(rmw_type_t<rmw_type_swap>(), xchg, mo, info); + } + + T fetch_add(typename atomic_add_type<T>::type value, memory_order mo, debug_info_param info) + { + return rmw(rmw_type_t<rmw_type_add>(), value, mo, info); + } + + T fetch_sub(typename atomic_add_type<T>::type value, memory_order mo, debug_info_param info) + { + return rmw(rmw_type_t<rmw_type_sub>(), value, mo, info); + } + + T fetch_and(T value, memory_order mo, debug_info_param info) + { + return rmw(rmw_type_t<rmw_type_and>(), value, mo, info); + } + + T fetch_or(T value, memory_order mo, debug_info_param info) + { + return rmw(rmw_type_t<rmw_type_or>(), value, mo, info); + } + + T fetch_xor(T value, memory_order mo, debug_info_param info) + { + return rmw(rmw_type_t<rmw_type_xor>(), value, mo, info); + } + + template<typename Y, rmw_type_e type> + RL_INLINE + T rmw(rmw_type_t<type>, Y op, memory_order mo, debug_info_param info) + { + switch (mo) + { + case mo_relaxed: return rmw_impl<Y, mo_relaxed, &thread_info_base::atomic_rmw_relaxed>(rmw_type_t<type>(), op, info); + case mo_consume: return rmw_impl<Y, mo_consume, &thread_info_base::atomic_rmw_acquire>(rmw_type_t<type>(), op, info); + case mo_acquire: return rmw_impl<Y, mo_acquire, &thread_info_base::atomic_rmw_acquire>(rmw_type_t<type>(), op, info); + case mo_release: return rmw_impl<Y, mo_release, &thread_info_base::atomic_rmw_release>(rmw_type_t<type>(), op, info); + case mo_acq_rel: return rmw_impl<Y, mo_acq_rel, &thread_info_base::atomic_rmw_acq_rel>(rmw_type_t<type>(), op, info); + case mo_seq_cst: return rmw_impl<Y, mo_seq_cst, &thread_info_base::atomic_rmw_seq_cst>(rmw_type_t<type>(), op, info); + } + + RL_VERIFY(false); + return T(); + } + + unpark_reason wait(context& c, bool is_timed, bool allow_spurious_wakeup, debug_info_param info) + { + sign_.check(info); + return c.threadx_->atomic_wait(impl_, is_timed, allow_spurious_wakeup, info); + } + + thread_id_t wake(context& c, thread_id_t count, debug_info_param info) + { + sign_.check(info); + return c.threadx_->atomic_wake(impl_, count, info); + } + +private: + T value_; + T history_ [atomic_history_size]; + atomic_data* impl_; + unsigned last_index_; + signature<987654321> sign_; + bool initialized_; + bool already_failed_; + + template<memory_order mo, unsigned (thread_info_base::*impl)(atomic_data* RL_RESTRICT data)> + T load_impl(debug_info_param info) const + { + context& c = ctx(); + c.sched(); + sign_.check(info); + + if (false == c.invariant_executing) + { + unsigned const index = (c.threadx_->*impl)(impl_); + if ((unsigned)-1 == index) + { + RL_HIST(atomic_load_event<T>) {this, T(), mo, false} RL_HIST_END(); + RL_ASSERT_IMPL(false, test_result_unitialized_access, "", info); + } + T const v = history_[index]; + + RL_HIST(atomic_load_event<T>) {this, v, mo, last_index_ != index} RL_HIST_END(); + + return v; + } + else + { + if (false == initialized_) + { + RL_HIST(atomic_load_event<T>) {this, T(), mo, false} RL_HIST_END(); + RL_ASSERT_IMPL(false, test_result_unitialized_access, "", info); + } + return value_; + } + } + + template<memory_order mo, unsigned (thread_info_base::*impl)(atomic_data* RL_RESTRICT data)> + void store_impl(T v, debug_info_param info) + { + context& c = ctx(); + RL_VERIFY(false == c.invariant_executing); + c.sched(); + sign_.check(info); + + unsigned const index = (c.threadx_->*impl)(impl_); + + T const prev = value_; + last_index_ = index; + history_[index] = v; + value_ = v; + initialized_ = true; + RL_HIST(atomic_store_event<T>) {this, prev, v, mo} RL_HIST_END(); + } + + template<bool spurious_failures, memory_order mo, unsigned (thread_info_base::*impl)(atomic_data* RL_RESTRICT data, bool&), memory_order failure_mo, unsigned (thread_info_base::*failure_impl)(atomic_data* RL_RESTRICT data)> + bool compare_swap_impl(T& cmp, T xchg, debug_info_param info) + { + context& c = ctx(); + RL_VERIFY(false == c.invariant_executing); + c.sched(); + sign_.check(info); + + if (false == initialized_) + { + RL_HIST(atomic_load_event<T>) {this, T(), mo, false} RL_HIST_END(); + RL_ASSERT_IMPL(false, test_result_unitialized_access, "", info); + } + + bool success = false; + bool spurious_failure = false; + bool aba = false; + + T const cmpv = cmp; + T const current = value_; + if (current == cmpv) + { + if (val(spurious_failures)) + { + if (c.is_random_sched()) + { + spurious_failure = (0 == c.rand(4, sched_type_cas_fail)); + } + else + { + if (false == already_failed_) + { + spurious_failure = 0 == c.rand(2, sched_type_cas_fail); + if (spurious_failure) + already_failed_ = true; + } + } + } + + if (false == spurious_failure) + { + success = true; + unsigned const index = (c.threadx_->*impl)(impl_, aba); + value_ = xchg; + last_index_ = index; + history_[index] = xchg; + } + } + + if (false == success) + { + (c.threadx_->*failure_impl)(impl_); + cmp = current; + } + + RL_HIST(atomic_cas_event<T>) {RL_INFO, this, current, cmpv, xchg, mo, success, spurious_failure, aba} RL_HIST_END(); + + return success; + } + + template<typename Y, memory_order mo, unsigned (thread_info_base::*impl)(atomic_data* RL_RESTRICT data, bool&), rmw_type_e type> + T rmw_impl(rmw_type_t<type>, Y op, debug_info_param info) + { + context& c = ctx(); + RL_VERIFY(false == c.invariant_executing); + c.sched(); + sign_.check(info); + + if (false == initialized_) + { + RL_HIST(atomic_load_event<T>) {this, T(), mo, false} RL_HIST_END(); + RL_ASSERT_IMPL(false, test_result_unitialized_access, "", info); + } + + bool aba; + unsigned const index = (c.threadx_->*impl)(impl_, aba); + + T const prev_value = value_; + T const new_value = perform_rmw(rmw_type_t<type>(), prev_value, op); + value_ = new_value; + last_index_ = index; + history_[index] = new_value; + + typedef atomic_rmw_event<T, Y> atomic_rmw_event_t; + RL_HIST(atomic_rmw_event_t) {RL_INFO, this, prev_value, op, new_value, mo, type} RL_HIST_END(); + + return prev_value; + } + + RL_NOCOPY(generic_atomic); +}; + + + + + +template<typename T> +class atomic : public generic_atomic<T, false> +{ +public: + atomic() + { + } + + /*explicit*/ atomic(T value) + { + this->store(value, mo_relaxed, $); + } + + atomic_proxy_const<T> operator () (debug_info_param info) const /*volatile*/ + { + return atomic_proxy_const<T>(*this, info); + } + + atomic_proxy<T> operator () (debug_info_param info) /*volatile*/ + { + return atomic_proxy<T>(*this, info); + } + + bool is_lock_free() const /*volatile*/ + { + return true; + } + + friend class atomic_proxy<T>; + friend class atomic_proxy_const<T>; + + RL_NOCOPY(atomic); +}; + + + + +typedef atomic<bool> atomic_bool; +typedef atomic<void*> atomic_address; + +typedef atomic<char> atomic_char; +typedef atomic<signed char> atomic_schar; +typedef atomic<unsigned char> atomic_uchar; +typedef atomic<short> atomic_short; +typedef atomic<unsigned short> atomic_ushort; +typedef atomic<int> atomic_int; +typedef atomic<unsigned int> atomic_uint; +typedef atomic<long> atomic_long; +typedef atomic<unsigned long> atomic_ulong; +typedef atomic<long long> atomic_llong; +typedef atomic<unsigned long long> atomic_ullong; +//typedef atomic<char16_t> atomic_char16_t; +//typedef atomic<char32_t> atomic_char32_t; +typedef atomic<wchar_t> atomic_wchar_t; + +//typedef atomic<int_least8_t> atomic_int_least8_t; +//typedef atomic<uint_least8_t> atomic_uint_least8_t; +//typedef atomic<int_least16_t> atomic_int_least16_t; +//typedef atomic<uint_least16_t> atomic_uint_least16_t; +//typedef atomic<int_least32_t> atomic_int_least32_t; +//typedef atomic<uint_least32_t> atomic_uint_least32_t; +//typedef atomic<int_least64_t> atomic_int_least64_t; +//typedef atomic<uint_least64_t> atomic_uint_least64_t; +//typedef atomic<int_fast8_t> atomic_int_fast8_t; +//typedef atomic<uint_fast8_t> atomic_uint_fast8_t; +//typedef atomic<int_fast16_t> atomic_int_fast16_t; +//typedef atomic<uint_fast16_t> atomic_uint_fast16_t; +//typedef atomic<int_fast32_t> atomic_int_fast32_t; +//typedef atomic<uint_fast32_t> atomic_uint_fast32_t; +//typedef atomic<int_fast64_t> atomic_int_fast64_t; +//typedef atomic<uint_fast64_t> atomic_uint_fast64_t; +typedef atomic<intptr_t> atomic_intptr_t; +typedef atomic<uintptr_t> atomic_uintptr_t; +typedef atomic<size_t> atomic_size_t; +//typedef atomic<ssize_t> atomic_ssize_t; +typedef atomic<ptrdiff_t> atomic_ptrdiff_t; +//typedef atomic<intmax_t> atomic_intmax_t; +//typedef atomic<uintmax_t> atomic_uintmax_t; + + + + +template<thread_id_t thread_count> +struct atomic_data_impl : atomic_data +{ + typedef thread_info<thread_count> thread_info_t; + + struct history_record + { + timestamp_t acq_rel_order_ [thread_count]; + timestamp_t last_seen_order_ [thread_count]; + + bool busy_; + bool seq_cst_; + thread_id_t thread_id_; + timestamp_t acq_rel_timestamp_; + }; + + static size_t const history_size = atomic_history_size; + aligned<history_record> history_ [history_size]; + unsigned current_index_; + waitset<thread_count> futex_ws_; + sync_var<thread_count> futex_sync_; + + atomic_data_impl() + { + current_index_ = 0; + history_record& rec = history_[0]; + history_[atomic_history_size - 1].busy_ = false; + + rec.busy_ = false; + rec.seq_cst_ = false; + rec.thread_id_ = (thread_id_t)-1; + } + + atomic_data_impl(thread_info_t& th) + { + current_index_ = 0; + history_[atomic_history_size - 1].busy_ = false; + + history_record& rec = history_[0]; + rec.busy_ = true; + rec.seq_cst_ = false; + rec.thread_id_ = th.index_; + + th.own_acq_rel_order_ += 1; + rec.acq_rel_timestamp_ = th.own_acq_rel_order_; + + foreach<thread_count>(rec.acq_rel_order_, assign_zero); + foreach<thread_count>(rec.last_seen_order_, assign<(timestamp_t)-1>); + rec.last_seen_order_[th.index_] = th.own_acq_rel_order_; + } +}; + + +} + + + +#endif diff --git a/libs/relacy/relacy/atomic_events.hpp b/libs/relacy/relacy/atomic_events.hpp @@ -0,0 +1,148 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_ATOMIC_EVENTS_HPP +#define RL_ATOMIC_EVENTS_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "memory_order.hpp" +#include "rmw.hpp" + + +namespace rl +{ + +template<typename T> class atomic; +template<typename T, bool strong_init> class generic_atomic; + +template<typename T> +struct atomic_add_type +{ + typedef T type; + typedef T output_type; +}; + +template<typename T> +struct atomic_add_type<T*> +{ + typedef ptrdiff_t type; + typedef void* output_type; +}; + + + + +template<typename T> +struct atomic_cas_event +{ + typedef typename atomic_add_type<T>::output_type type; + + debug_info var_info_; + void const* var_addr_; + type cur_value_; + type cmp_value_; + type xchg_value_; + memory_order mo_; + bool success_; + bool spurious_failure_; + bool aba_; + + void output(std::ostream& s) const + { + s << "<" << std::hex << var_addr_ << std::dec << ">" + << " CAS " + << (success_ ? "succ " : "fail ") + << (spurious_failure_ ? "[SPURIOUSLY] " : "") + << (aba_ ? "[ABA] " : "") + << "orig=" << cur_value_ + << ", cmp=" << cmp_value_ + << ", xchg=" << xchg_value_ + << ", order=" << format(mo_); + } +}; + + + + +template<typename T> +struct atomic_load_event +{ + typedef typename atomic_add_type<T>::output_type type; + + void const* var_addr_; + type value_; + memory_order mo_; + bool not_current_; + + void output(std::ostream& s) const + { + s << "<" << std::hex << var_addr_ << std::dec << ">" + << " atomic load, value=" << value_ + << (not_current_ ? " [NOT CURRENT]" : "") + << ", order=" << format(mo_); + } +}; + + + + +template<typename T> +struct atomic_store_event +{ + typedef typename atomic_add_type<T>::output_type type; + + void const* var_addr_; + type prev_value_; + type value_; + memory_order mo_; + + void output(std::ostream& s) const + { + s << "<" << std::hex << var_addr_ << std::dec << ">" + << " atomic store, value=" << value_ + << ", (prev value=" << prev_value_ << ")" + << ", order=" << format(mo_); + } +}; + + + + +template<typename T, typename Y> +struct atomic_rmw_event +{ + typedef typename atomic_add_type<T>::output_type type; + + debug_info var_info_; + void const* var_addr_; + type prev_value_; + Y op_value_; + type new_value_; + memory_order mo_; + rmw_type_e type_; + + void output(std::ostream& s) const + { + s << "<" << std::hex << var_addr_ << std::dec << ">" + << " " << format(type_) << " " + << ", prev=" << prev_value_ + << ", arg=" << op_value_ + << ", new=" << new_value_ + << ", order=" << format(mo_); + } +}; + + +} + + +#endif diff --git a/libs/relacy/relacy/atomic_fence.hpp b/libs/relacy/relacy/atomic_fence.hpp @@ -0,0 +1,83 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_FENCE_HPP +#define RL_FENCE_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "context.hpp" +#include "memory_order.hpp" + + +namespace rl +{ + + +struct atomic_fence_event +{ + memory_order mo_; + bool is_thread_fence_; + + void output(std::ostream& s) const + { + s << (is_thread_fence_ ? "" : "compiler ") + << format(mo_) << " fence"; + } +}; + + + + +RL_INLINE +void atomic_thread_fence(memory_order mo, debug_info_param info) +{ + context& c = ctx(); + RL_VERIFY(false == c.invariant_executing); + + switch (mo) + { + case mo_relaxed: + RL_VERIFY(false); + break; + case mo_consume: + case mo_acquire: + c.atomic_thread_fence_acquire(); + break; + case mo_release: + c.atomic_thread_fence_release(); + break; + case mo_acq_rel: + c.atomic_thread_fence_acq_rel(); + break; + case mo_seq_cst: + c.atomic_thread_fence_seq_cst(); + break; + } + + RL_HIST(atomic_fence_event) {mo, true} RL_HIST_END(); +} + + + + +RL_INLINE +void atomic_signal_fence(memory_order mo, debug_info_param info) +{ + context& c = ctx(); + RL_HIST(atomic_fence_event) {mo, false} RL_HIST_END(); +} + + +} + + +#endif diff --git a/libs/relacy/relacy/backoff.hpp b/libs/relacy/relacy/backoff.hpp @@ -0,0 +1,57 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_BACKOFF_HPP +#define RL_BACKOFF_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "context_base.hpp" + + +namespace rl +{ + + +inline void yield(unsigned count, debug_info_param info) +{ + ctx().yield(count, info); +} + + +template<unsigned factor_t, unsigned add_t> +class backoff_t +{ +public: + backoff_t() + : count_(1) + { + } + + void yield(debug_info_param info) + { + rl::yield(count_, info); + count_ = count_ * factor_t + add_t; + } + +private: + unsigned count_; +}; + + +typedef backoff_t<1, 0> backoff; +typedef backoff_t<1, 1> linear_backoff; +typedef backoff_t<2, 0> exp_backoff; + + +} + +#endif diff --git a/libs/relacy/relacy/base.hpp b/libs/relacy/relacy/base.hpp @@ -0,0 +1,144 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_BASE_HPP +#define RL_BASE_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "pch.hpp" +#include "platform.hpp" + +namespace rl +{ +size_t const subsequent_timed_wait_limit = 4; +} + +#define RL_TEST + +#ifdef RL_JAVA_MODE +# define RL_GC +# define RL_NO_MALLOC +# define RL_JAVA_API +# define RL_JAVA_MM +#endif + +#ifdef RL_CLI_MODE +# define RL_GC +# define RL_NO_MALLOC +# define RL_CLI_API +# define RL_CLI_MM +#endif + +#ifdef RL_POSIX_MODE +# define RL_POSIX_API +#endif + +#ifdef RL_WIN_MODE +# define RL_WIN_API +#endif + +#ifdef RL_CPP_MODE +# define RL_CPP_API +# define RL_CPP_MM +#endif + +#if defined(RL_JAVA_MM) || defined(RL_CLI_MM) +# define RL_IMPROVED_SEQ_CST_FENCE +# define RL_IMPROVED_SEQ_CST_RMW +#endif + +namespace rl +{ + +#define RL_NOCOPY(CLASS) \ + private: \ + CLASS(CLASS const&); \ + CLASS& operator = (CLASS const&); +/**/ + + +template<typename T = void> +class nocopy +{ + nocopy(nocopy const&); + nocopy& operator = (nocopy const&); + +protected: + nocopy() {} +}; + + +template<size_t sz, size_t base = 4> +struct align_pad +{ + template<bool perfect, bool fit, int fake> struct helper + { + struct type { char pad [base - sz]; }; + }; + + template<int fake> struct helper<true, true, fake> + { + struct type {}; + }; + + template<bool perfect, int fake> struct helper<perfect, false, fake> + { + typedef typename align_pad<sz, base * 2>::type type; + }; + + typedef typename helper<sz == base, sz <= base, 0>::type type; +}; + + +template<typename T> +struct aligned : T, align_pad<sizeof(T)>::type +{}; + +template<typename T> +T val(T x) +{ + return x; +} + +} + + +#include "defs.hpp" + + +#define RL_INFO ::rl::debug_info(__FUNCTION__, __FILE__, __LINE__) +#define $ RL_INFO + + +#ifdef RL_DO_ASSERT +# if RL_DO_ASSERT +# define RL_DO_ASSERT_IMPL +# endif +#else +# ifdef _DEBUG +# define RL_DO_ASSERT_IMPL +# endif +#endif + +#ifdef _MSC_VER +# define RL_INT3() __debugbreak(); abort() +#else +# define RL_INT3() abort() +#endif + +#ifdef RL_DO_ASSERT_IMPL +# define RL_VERIFY(x) do { if (!((void)0, (x))) { \ + ::rl::assert_failed(#x, $); RL_INT3(); } } while ((void)0, 0) +#else +# define RL_VERIFY(x) (void)0 +#endif + +#endif diff --git a/libs/relacy/relacy/cli.hpp b/libs/relacy/relacy/cli.hpp @@ -0,0 +1,52 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_CLI_HPP +#define RL_CLI_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "context_base.hpp" +#include "atomic_fence.hpp" + + +namespace rl +{ + + +struct Thread +{ + static void MemoryBarrier(debug_info_param info) + { + atomic_thread_fence(mo_seq_cst, info); + } + + template<typename T> + static T VolatileRead(generic_atomic<T, true> const& v, debug_info_param info) + { + return v.load(mo_acquire, info); + } + + template<typename T> + static void VolatileWrite(generic_atomic<T, true>& v, T x, debug_info_param info) + { + v.store(x, mo_release, info); + } + + static void SpinWait(int iterations, debug_info_param info) + { + ctx().yield(iterations, info); + } +}; + +} + +#endif diff --git a/libs/relacy/relacy/cli_interlocked.hpp b/libs/relacy/relacy/cli_interlocked.hpp @@ -0,0 +1,67 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_CLI_INTERLOCKED_HPP +#define RL_CLI_INTERLOCKED_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "atomic.hpp" + + +namespace rl +{ + + struct Interlocked + { + template<typename T> + static T Add(generic_atomic<T, true>& v, T x, debug_info_param info) + { + T result = v.rmw(rmw_type_t<rmw_type_add>(), x, mo_seq_cst, info) + x; + return result; + } + + template<typename T> + static T CompareExchange(generic_atomic<T, true>& v, T xchg, T cmp, debug_info_param info) + { + v.compare_exchange(bool_t<false>(), cmp, xchg, mo_seq_cst, mo_seq_cst, info); + return cmp; + } + + template<typename T> + static T Increment(generic_atomic<T, true>& v, debug_info_param info) + { + return Add(v, (T)1, info); + } + + template<typename T> + static T Decrement(generic_atomic<T, true>& v, debug_info_param info) + { + return Add(v, (T)-1, info); + } + + template<typename T> + static T Exchange(generic_atomic<T, true>& v, T x, debug_info_param info) + { + T result = v.rmw(rmw_type_t<rmw_type_swap>(), x, mo_seq_cst, info); + return result; + } + + template<typename T> + static T Read(generic_atomic<T, true> const& v, debug_info_param info) + { + return v.load(mo_acquire, info); + } + }; + +} + +#endif diff --git a/libs/relacy/relacy/cli_var.hpp b/libs/relacy/relacy/cli_var.hpp @@ -0,0 +1,158 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_CLI_VAR_HPP +#define RL_CLI_VAR_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "atomic.hpp" + + +namespace rl +{ + +template<typename T> class nvar; + + +template<typename T> +class nvar_proxy +{ +public: + typedef typename atomic_add_type<T>::type add_type; + template<typename Y> friend class nvar; + + operator T () const + { + return load(); + } + + T operator = (T value) + { + store(value); + return value; + } + + T operator = (nvar_proxy const& r) + { + T const value = r.load(); + store(value); + return *this; + } + + T operator ++ (int) + { + T tmp = load(); + store(tmp + 1); + return tmp; + } + + T operator -- (int) + { + T tmp = load(); + store(tmp - 1); + return tmp; + } + + T operator ++ () + { + T tmp = load(); + store(tmp + 1); + return tmp + 1; + } + + T operator -- () + { + T tmp = load(); + store(tmp - 1); + return tmp - 1; + } + + T operator += (add_type value) + { + T tmp = load(); + store(tmp + value); + return tmp + value; + } + + T operator -= (add_type value) + { + T tmp = load(); + store(tmp - value); + return tmp - value; + } + +private: + nvar<T>& var_; + debug_info info_; + + nvar_proxy(nvar<T>& var, debug_info_param info) + : var_(var) + , info_(info) + { + } + + T load() const + { + return var_.load(mo_relaxed, info_); + } + + void store(T value) + { + var_.store(value, mo_relaxed, info_); + } +}; + + + + +template<typename T> +class nvar : public generic_atomic<T, true> +{ +public: + typedef nvar_proxy<T> proxy_t; + friend class nvar_proxy<T>; + + nvar() + { + } + + explicit nvar(T value) + { + this->store(value, mo_relaxed, $); + } + + nvar(nvar const& r) + { + T const value = r.load(mo_relaxed, $); + this->store(value, mo_relaxed, $); + } + + nvar(proxy_t const& r) + { + T const value = r.load(); + this->store(value, mo_relaxed, r.info_); + } + + proxy_t operator () (debug_info_param info) + { + return proxy_t(*this, info); + } + +private: + nvar& operator = (nvar const&); +}; + + + +} + +#endif diff --git a/libs/relacy/relacy/cli_volatile.hpp b/libs/relacy/relacy/cli_volatile.hpp @@ -0,0 +1,161 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_CLI_VOLATILE_HPP +#define RL_CLI_VOLATILE_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "atomic.hpp" + + +//!!! fix Java volatiles! +// they must be modeled as seq_cst stores/loads + +namespace rl +{ + +template<typename T> class nvolatile; + + +template<typename T> +class nvolatile_proxy +{ +public: + typedef typename atomic_add_type<T>::type add_type; + template<typename Y> friend class nvolatile; + + operator T () const + { + return load(); + } + + T operator = (T value) + { + store(value); + return value; + } + + T operator = (nvolatile_proxy const& r) + { + T const value = r.load(); + store(value); + return *this; + } + + T operator ++ (int) + { + T tmp = load(); + store(tmp + 1); + return tmp; + } + + T operator -- (int) + { + T tmp = load(); + store(tmp - 1); + return tmp; + } + + T operator ++ () + { + T tmp = load(); + store(tmp + 1); + return tmp + 1; + } + + T operator -- () + { + T tmp = load(); + store(tmp - 1); + return tmp - 1; + } + + T operator += (add_type value) + { + T tmp = load(); + store(tmp + value); + return tmp + value; + } + + T operator -= (add_type value) + { + T tmp = load(); + store(tmp - value); + return tmp - value; + } + +private: + nvolatile<T>& var_; + debug_info info_; + + nvolatile_proxy(nvolatile<T>& var, debug_info_param info) + : var_(var) + , info_(info) + { + } + + T load() const + { + return var_.load(mo_acquire, info_); + } + + void store(T value) + { + var_.store(value, mo_release, info_); + } +}; + + + + +template<typename T> +class nvolatile : public generic_atomic<T, true> +{ +public: + typedef nvolatile_proxy<T> proxy_t; + friend class nvolatile_proxy<T>; + + nvolatile() + { + } + + explicit nvolatile(T value) + { + //??? whether here must be mo_relaxed or mo_release? + this->store(value, mo_release, $); + } + + nvolatile(nvolatile const& r) + { + T const value = r.load(mo_acquire, $); + //??? whether here must be mo_relaxed or mo_release? + this->store(value, mo_release, $); + } + + nvolatile(proxy_t const& r) + { + T const value = r.var_.load(mo_acquire, r.info_); + //??? whether here must be mo_relaxed or mo_release? + this->store(value, mo_release, r.info_); + } + + proxy_t operator () (debug_info_param info) + { + return proxy_t(*this, info); + } +}; + + + +} + +#endif diff --git a/libs/relacy/relacy/context.hpp b/libs/relacy/relacy/context.hpp @@ -0,0 +1,1291 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_CONTEXT_HPP +#define RL_CONTEXT_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "thread_local_ctx.hpp" +#include "context_base.hpp" +#include "thread.hpp" +#include "history.hpp" +#include "memory.hpp" +#include "test_result.hpp" +#include "slab_allocator.hpp" +#include "test_params.hpp" +#include "random.hpp" +#include "foreach.hpp" + +#include "random_scheduler.hpp" +#include "full_search_scheduler.hpp" +#include "context_bound_scheduler.hpp" + + + +namespace rl +{ + +template<thread_id_t thread_count> class generic_mutex_data_impl; +template<thread_id_t thread_count> class condvar_data_impl; +template<thread_id_t thread_count> class sema_data_impl; +template<thread_id_t thread_count> class event_data_impl; + + +struct park_event +{ + bool is_timed_; + bool allow_spurious_; + + void output(std::ostream& s) const + { + s << "blocking current thread" << (is_timed_ ? " [timed]" : ""); + } +}; + +struct unpark_event +{ + thread_id_t thread_; + + void output(std::ostream& s) const + { + s << "unblocking thread " << thread_; + } +}; + +struct yield_event +{ + unsigned count_; + + void output(std::ostream& s) const + { + s << "yield(" << count_ << ")"; + } +}; + + +/* +template<typename test_t, typename scheduler_t> +struct context_persistent +{ + static thread_id_t const thread_count = test_t::params::thread_count; + fiber_t fibers_ [thread_count]; + memory_mgr memory_; + + context_persistent() + { + for (thread_id_t i = 0; i != thread_count; ++i) + { + create_fiber(fibers_[i], &context_impl<test_t, scheduler_t>::fiber_proc, (void*)(intptr_t)i); + } + } + + ~context_persistent() + { + for (thread_id_t i = 0; i != thread_count; ++i) + { + delete_fiber(fibers_[i]); + } + } +}; +*/ + + +template<typename test_t, typename scheduler_t> +class context_impl + : thread_local_contxt_impl<context_addr_hash_impl<context, test_t::params::thread_count>, test_t::params::thread_count> +{ +private: + typedef thread_local_contxt_impl + <context_addr_hash_impl<context, test_t::params::thread_count>, + test_t::params::thread_count> + base_t; + typedef typename scheduler_t::shared_context_t shared_context_t; + + using base_t::params_; + using base_t::history_; + using base_t::threadx_; + using base_t::disable_preemption_; + using base_t::disable_alloc_; + using base_t::invariant_executing; + + static thread_id_t const main_thread_id = -1; + static thread_id_t const static_thread_count = test_t::params::static_thread_count; + static thread_id_t const dynamic_thread_count = test_t::params::dynamic_thread_count; + static thread_id_t const thread_count = test_t::params::thread_count; + + iteration_t current_iter_; + test_result_e test_result_; + string test_result_str_; + fiber_t main_fiber_; + bool special_function_executing; + memory_mgr memory_; + iteration_t start_iteration_; + size_t sched_count_; + scheduler_t sched_; + shared_context_t& sctx_; + random_generator rand_; + test_t* current_test_suite; + bool current_test_suite_constructed; + bool first_thread_; + timestamp_t seq_cst_fence_order_ [thread_count]; + + aligned<thread_info<thread_count> > threads_ [thread_count]; + + thread_info<thread_count>& threadi() + { + return *static_cast<thread_info<thread_count>*>(threadx_); + } + + slab_allocator<atomic_data_impl<thread_count> >* atomic_alloc_; + slab_allocator<var_data_impl<thread_count> >* var_alloc_; + slab_allocator<generic_mutex_data_impl<thread_count> >* mutex_alloc_; + slab_allocator<condvar_data_impl<thread_count> >* condvar_alloc_; + slab_allocator<sema_data_impl<thread_count> >* sema_alloc_; + slab_allocator<event_data_impl<thread_count> >* event_alloc_; + + virtual atomic_data* atomic_ctor(void* ctx) + { + return new (atomic_alloc_->alloc(ctx)) atomic_data_impl<thread_count> (); + } + + virtual void atomic_dtor(atomic_data* data) + { + static_cast<atomic_data_impl<thread_count>*>(data)->~atomic_data_impl<thread_count>(); + atomic_alloc_->free(static_cast<atomic_data_impl<thread_count>*>(data)); + } + + virtual var_data* var_ctor() + { + return new (var_alloc_->alloc()) var_data_impl<thread_count> (); + } + + virtual void var_dtor(var_data* data) + { + static_cast<var_data_impl<thread_count>*>(data)->~var_data_impl<thread_count>(); + var_alloc_->free(static_cast<var_data_impl<thread_count>*>(data)); + } + + virtual unpark_reason wfmo_park(void** ws, + win_waitable_object** wo, + size_t count, + bool wait_all, + bool is_timed, + debug_info_param info) + { + return waitset<thread_count>::park_current(*this, + reinterpret_cast<waitset<thread_count>**>(ws), + wo, count, wait_all, is_timed, true, info); + } + +public: + context_impl(test_params& params, shared_context_t& sctx) + : base_t(thread_count, params) + , current_iter_(0) + , start_iteration_(1) + , sched_(params, sctx, dynamic_thread_count) + , sctx_(sctx) + { + this->context::seq_cst_fence_order_ = this->seq_cst_fence_order_; + + current_test_suite = (test_t*)(::malloc)(sizeof(test_t)); + current_test_suite_constructed = false; + + test_result_ = test_result_success; + threadx_ = 0; + special_function_executing = false; + invariant_executing = false; + + create_main_fiber(main_fiber_); + set_low_thread_prio(); + + if (0 == val(thread_count)) + { + throw std::logic_error("no threads created"); + } + + atomic_alloc_ = new slab_allocator<atomic_data_impl<thread_count> >(); + var_alloc_ = new slab_allocator<var_data_impl<thread_count> >(); + mutex_alloc_ = new slab_allocator<generic_mutex_data_impl<thread_count> >(); + condvar_alloc_ = new slab_allocator<condvar_data_impl<thread_count> >(); + sema_alloc_ = new slab_allocator<sema_data_impl<thread_count> >(); + event_alloc_ = new slab_allocator<event_data_impl<thread_count> >(); + + for (thread_id_t i = 0; i != thread_count; ++i) + { + new (&threads_[i]) thread_info<thread_count> (i); + threads_[i].ctx_ = this; + } + + for (thread_id_t i = 0; i != thread_count; ++i) + { + //threads_[i].fiber_ = persistent.fibers_[i]; + create_fiber(threads_[i].fiber_, &context_impl::fiber_proc, (void*)(intptr_t)i); + } + + disable_alloc_ = 0; + } + + ~context_impl() + { + disable_alloc_ += 1; + + for (thread_id_t i = 0; i != thread_count; ++i) + { + delete_fiber(threads_[i].fiber_); + } + + delete_main_fiber(main_fiber_); + + // there can be atomic loads and stores etc + // it's not good place to calling user code + //destroy_current_test_suite(); + //::free(current_test_suite); + + delete atomic_alloc_; + delete var_alloc_; + delete mutex_alloc_; + delete condvar_alloc_; + delete sema_alloc_; + delete event_alloc_; + } + + void construct_current_test_suite() + { + RL_VERIFY(false == current_test_suite_constructed); + new (current_test_suite) test_t (); + current_test_suite_constructed = true; + } + + void destroy_current_test_suite() + { + if (current_test_suite_constructed) + { + current_test_suite->~test_t(); + current_test_suite_constructed = false; + } + } + + virtual void* alloc(size_t size, bool is_array, debug_info_param info) + { + disable_alloc_ += 1; +#ifndef RL_GC + void* p = memory_.alloc(size); +#else + void* p = memory_.alloc(size, (void(*)(void*))0); +#endif + disable_alloc_ -= 1; + RL_HIST_CTX(memory_alloc_event) {p, size, is_array} RL_HIST_END(); + return p; + } + +#ifdef RL_GC + virtual void* alloc(size_t size, bool is_array, void(*dtor)(void*), debug_info_param info) + { + disable_alloc_ += 1; + void* p = memory_.alloc(size, dtor); + disable_alloc_ -= 1; + RL_HIST_CTX(memory_alloc_event) {p, size, is_array} RL_HIST_END(); + return p; + } +#endif + + virtual void free(void* p, bool is_array, debug_info_param info) + { + RL_HIST_CTX(memory_free_event) {p, is_array} RL_HIST_END(); +#ifndef RL_GC + bool const defer = (0 == sched_.rand(this->is_random_sched() ? 4 : 2, sched_type_mem_realloc)); +#else + bool const defer = false; +#endif + disable_alloc_ += 1; + if (false == memory_.free(p, defer)) + fail_test("incorrect address passed to free() function", test_result_double_free, info); + disable_alloc_ -= 1; + } + + size_t prev_alloc_size_; + debug_info last_info_; + + virtual void* alloc(size_t size) + { + if (disable_alloc_) + return (::malloc)(size); + + prev_alloc_size_ = size; + disable_alloc_ += 1; +#ifndef RL_GC + void* p = (memory_.alloc)(size); +#else + void* p = (memory_.alloc)(size, 0); +#endif + disable_alloc_ -= 1; + return p; + } + + virtual size_t prev_alloc_size() + { + size_t sz = prev_alloc_size_; + prev_alloc_size_ = 0; + return sz; + } + + virtual void set_debug_info(debug_info_param info) + { + last_info_ = info; + } + + virtual void free(void* p) + { + if (disable_alloc_) + { + (::free)(p); + return; + } + + disable_alloc_ += 1; + debug_info const& info = last_info_; + RL_HIST_CTX(memory_free_event) {p, false} RL_HIST_END(); +#ifndef RL_GC + bool const defer = (0 == sched_.rand(this->is_random_sched() ? 4 : 2, sched_type_mem_realloc)); +#else + bool const defer = false; +#endif + if (false == memory_.free(p, defer)) + fail_test("incorrect address passed to free() function", test_result_double_free, info); + disable_alloc_ -= 1; + } + + virtual unpark_reason park_current_thread(bool is_timed, + bool allow_spurious_wakeup, + bool do_switch, + debug_info_param info) + { + RL_VERIFY(false == special_function_executing); + RL_VERIFY(threadx_->saved_disable_preemption_ == -1); + unsigned dp = disable_preemption_; + disable_preemption_ = 0; + RL_HIST_CTX(park_event) {is_timed, allow_spurious_wakeup} RL_HIST_END(); + if (false == sched_.park_current_thread(is_timed, allow_spurious_wakeup)) + { + fail_test("deadlock detected", test_result_deadlock, info); + } + schedule(1); + // otherwise it's restored in switch_back() + RL_VERIFY(threadx_->saved_disable_preemption_ == -1); + if (do_switch == false || threadx_->unpark_reason_ != unpark_reason_normal) + disable_preemption_ = dp; + else + threadx_->saved_disable_preemption_ = dp; + unpark_reason reason = threadx_->unpark_reason_; + return reason; + } + + virtual void unpark_thread(thread_id_t th, bool do_switch, debug_info_param info) + { + RL_VERIFY(false == special_function_executing); + RL_HIST_CTX(unpark_event) {th} RL_HIST_END(); + sched_.unpark_thread(th, do_switch); + if (do_switch) + { + threads_[th].unpark_reason_ = unpark_reason_normal; + threads_[th].temp_switch_from_ = threadx_->index_; + switch_to_fiber(th); + } + } + + virtual void switch_back(debug_info_param info) + { +//std::cout << "switching back from " << threadx_->index_ << " to " << threadx_->temp_switch_from_ << std::endl; + (void)info; + RL_VERIFY(threadx_->saved_disable_preemption_ != -1); + RL_VERIFY(threadx_->temp_switch_from_ != -1); + thread_id_t const tid = threadx_->temp_switch_from_; + threadx_->temp_switch_from_ = -1; + switch_to_fiber(tid); + RL_VERIFY(threadx_->saved_disable_preemption_ != -1); + disable_preemption_ = threadx_->saved_disable_preemption_; + threadx_->saved_disable_preemption_ = -1; + } + + void ensure(bool cond, char const* desc, test_result_e res, debug_info_param info) + { + if (false == cond) + fail_test(desc, res, info); + } + + virtual void fail_test(char const* desc, test_result_e res, debug_info_param info) + { + + RL_DEBUGBREAK_ON_FAILURE_IMPL; + + RL_VERIFY(test_result_success != res); + + test_result_ = res; + if (test_result_user_assert_failed == res && invariant_executing) + test_result_ = test_result_user_invariant_failed; + if (0 == desc || 0 == desc[0]) + test_result_str_ = test_result_str(test_result_); + else + test_result_str_ = string(test_result_str(test_result_)) + " (" + desc + ")"; + + RL_HIST_CTX(user_event) {test_result_str_.c_str()} RL_HIST_END(); + + switch_to_main_fiber(); + } + + virtual void rl_until(char const* desc, debug_info_param info) + { + RL_HIST_CTX(user_event) {desc} RL_HIST_END(); + test_result_ = test_result_until_condition_hit; + switch_to_main_fiber(); + } + + static void fiber_proc(void* thread_index); + + virtual void fiber_proc_impl(int thread_index) + { + thread_info_base* param = &threads_[thread_index]; + debug_info info = $; + for (;;) + { + if (first_thread_) + { + first_thread_ = false; + special_function_executing = true; + RL_HIST_CTX(user_event) {"[CTOR BEGIN]"} RL_HIST_END(); + construct_current_test_suite(); + RL_HIST_CTX(user_event) {"[CTOR END]"} RL_HIST_END(); + RL_HIST_CTX(user_event) {"[BEFORE BEGIN]"} RL_HIST_END(); + current_test_suite->before(); + RL_HIST_CTX(user_event) {"[BEFORE END]"} RL_HIST_END(); + rl_global_fence(); + invariant_executing = true; + current_test_suite->invariant(); + invariant_executing = false; + special_function_executing = false; + } + +//std::cout << "thread " << param->index_ << " started" << std::endl; + param->on_start(); + + if (param->index_ < static_thread_count) + { + current_test_suite->thread(param->index_); + } + else + { + if (param->dynamic_thread_func_) + param->dynamic_thread_func_(param->dynamic_thread_param_); + } + +//std::cout << "thread " << param->index_ << " finished" << std::endl; + RL_HIST_CTX(user_event) {"[THREAD FINISHED]"} RL_HIST_END(); + RL_VERIFY(disable_preemption_ == 0); + RL_VERIFY(threadx_->temp_switch_from_ == -1); + RL_VERIFY(threadx_->saved_disable_preemption_ == -1); + + param->on_finish(); + + thread_finish_result res = sched_.thread_finished(); +//std::cout << "thread " << param->index_ << " finished res=" << res << std::endl; + if (thread_finish_result_normal == res) + { + sched(); + } + else if (thread_finish_result_last == res) + { + special_function_executing = true; + invariant_executing = true; + current_test_suite->invariant(); + invariant_executing = false; + rl_global_fence(); + RL_HIST_CTX(user_event) {"[AFTER BEGIN]"} RL_HIST_END(); + current_test_suite->after(); + RL_HIST_CTX(user_event) {"[AFTER END]"} RL_HIST_END(); + RL_HIST_CTX(user_event) {"[DTOR BEGIN]"} RL_HIST_END(); + destroy_current_test_suite(); + RL_HIST_CTX(user_event) {"[DTOR END]"} RL_HIST_END(); + special_function_executing = false; + + ensure(memory_.iteration_end(), "memory leak detected", test_result_memory_leak, $); + ensure(atomic_alloc_->iteration_end(), "atomic leak", test_result_resource_leak, $); + ensure(var_alloc_->iteration_end(), "var leak", test_result_resource_leak, $); + ensure(mutex_alloc_->iteration_end(), "mutex leak", test_result_resource_leak, $); + ensure(condvar_alloc_->iteration_end(), "condition variable leak", test_result_resource_leak, $); + ensure(sema_alloc_->iteration_end(), "semaphore leak", test_result_resource_leak, $); + ensure(event_alloc_->iteration_end(), "event leak", test_result_resource_leak, $); + + switch_to_main_fiber(); + } + else if (thread_finish_result_deadlock == res) + { + fail_test("deadlock detected", test_result_deadlock, info); + } + else + { + RL_VERIFY(false); + } + } + } + + virtual win_waitable_object* create_thread(void*(*fn)(void*), void* ctx) + { + RL_VERIFY(fn); + thread_id_t id = sched_.create_thread(); + threads_[id].dynamic_thread_func_ = fn; + threads_[id].dynamic_thread_param_ = ctx; + threads_[id].sync_object_.on_create(); + return &threads_[id].sync_object_; + } + + virtual void yield(unsigned count, debug_info_param info) + { + RL_VERIFY(count); + RL_HIST_CTX(yield_event) {count} RL_HIST_END(); + if (sched_count_++ > params_.execution_depth_limit) + fail_test("livelock", test_result_livelock, RL_INFO); + schedule(count); + } + + virtual void sched() + { + if (sched_count_++ > params_.execution_depth_limit) + fail_test("livelock", test_result_livelock, RL_INFO); + if (disable_preemption_) + return; + schedule(0); + } + + void schedule(unsigned yield) + { + RL_VERIFY(threadx_->temp_switch_from_ == -1); + RL_VERIFY(disable_preemption_ == 0); + if (special_function_executing) + { + threadx_->unpark_reason_ = unpark_reason_normal; + return; + } + + special_function_executing = true; + invariant_executing = true; + current_test_suite->invariant(); + invariant_executing = false; + special_function_executing = false; + + if (yield) + threadx_->last_yield_ = threadi().own_acq_rel_order_; + + unpark_reason reason = unpark_reason_normal; + thread_id_t const th = sched_.schedule(reason, yield); + threads_[th].unpark_reason_ = reason; + + switch_to_fiber(th); + RL_VERIFY(0 == disable_preemption_); + } + + test_result_e simulate(std::ostream& ss, std::istream& sss, bool second) + { + if (EOF != sss.peek()) + { + sss >> start_iteration_; + sched_.set_state(sss); + } + + test_result_e const res = simulate2(second); + + if (test_result_success != res && false == params_.collect_history) + { + ss << params_.stop_iteration << " "; + sched_.get_state(ss); + } + + return res; + } + + test_result_e simulate2(bool second) + { + debug_info info = $; + + current_iter_ = start_iteration_; + for (; ; ++current_iter_) + { + rand_.seed(current_iter_); + + iteration(current_iter_); + + if (test_result_success != test_result_) + { + params_.test_result = test_result_; + params_.stop_iteration = current_iter_; + if (params_.collect_history) + output_history(); + return test_result_; + } + + // If you hit assert here, then probably your test is non-deterministic + // Check whether you are using functions like ::rand() + // or static variables or values of object addresses (for hashing) in your test + // Replace ::rand() with rl::rand(), eliminate static variables in the test + RL_VERIFY(second == false); + (void)second; + + RL_HIST_CTX(user_event) {"ITERATION END"} RL_HIST_END(); + + if (sched_.iteration_end()) + break; + } + + params_.test_result = test_result_success; + params_.stop_iteration = current_iter_; + return test_result_success; + } + + RL_INLINE static void reset_thread(thread_info<thread_count>& ti) + { + foreach<thread_count>( + ti.acquire_fence_order_, + &assign_zero); + foreach<thread_count>( + ti.release_fence_order_, + &assign_zero); + +#ifdef RL_IMPROVED_SEQ_CST_FENCE + foreach<thread_count>(ti.imp_seq_cst_order_, &assign_zero); +#endif + } + + void iteration(iteration_t iter) + { + first_thread_ = true; + disable_preemption_ = 0; + sched_count_ = 0; + + foreach<thread_count>( + threads_, + &context_impl::reset_thread); + + foreach<thread_count>( + seq_cst_fence_order_, + &assign_zero); + + base_t::iteration_begin(); + + for (thread_id_t i = 0; i != thread_count; ++i) + { + threads_[i].iteration_begin(); + } + + disable_alloc_ += 1; + thread_id_t const th = sched_.iteration_begin(iter); + disable_alloc_ -= 1; + switch_to_fiber(th); + + if (0 == iter % progress_probe_period) + { + output_progress(iter); + } + } + +private: + void switch_to_fiber(thread_id_t th) + { + fiber_t& prev = threadx_ ? threadx_->fiber_ : main_fiber_; + threadx_ = &threads_[th]; + ::switch_to_fiber(threadx_->fiber_, prev); + } + + void switch_to_main_fiber() + { + fiber_t& prev = threadx_->fiber_; + threadx_ = 0; + ::switch_to_fiber(main_fiber_, prev); + } + + void output_progress(iteration_t iter) + { + iteration_t const total = sched_.iteration_count(); + + if (0 == iter % (progress_probe_period * 16)) + { + disable_alloc_ += 1; + *params_.progress_stream << iter * 100 / total << "% (" + << iter << "/" << total << ")" << std::endl; + disable_alloc_ -= 1; + } + } + + virtual unsigned rand(unsigned limit, sched_type t) + { + return sched_.rand(limit, t); + } + + void output_history() + { + if (false == params_.output_history) + { + *params_.output_stream << test_result_str_ << std::endl; + *params_.output_stream << "iteration: " << params_.stop_iteration << std::endl; + *params_.output_stream << std::endl; + } + history_.print_exec_history(params_.output_history); + +#ifndef RL_GC + if (test_result_memory_leak == test_result_) + { + memory_.output_allocs(*params_.output_stream); + } +#endif + + //!!! output other leaked resources + if (test_result_ == test_result_resource_leak + && atomic_alloc_->iteration_end() == false) + { + *params_.output_stream << "leaked atomics:" << std::endl; + atomic_alloc_->output_allocs(*params_.output_stream); + } + } + + void rl_global_fence() + { + timestamp_t max_acq_rel = 0; + for (thread_id_t i = 0; i != thread_count; ++i) + { + if (threads_[i].acq_rel_order_[i] > max_acq_rel) + max_acq_rel = threads_[i].acq_rel_order_[i]; + } + + for (thread_id_t i = 0; i != thread_count; ++i) + { + for (thread_id_t j = 0; j != thread_count; ++j) + { + threads_[i].acq_rel_order_[j] = max_acq_rel; + } + } + } + + virtual void atomic_thread_fence_acquire() + { + threadi().atomic_thread_fence_acquire(); + } + + virtual void atomic_thread_fence_release() + { + threadi().atomic_thread_fence_release(); + } + + virtual void atomic_thread_fence_acq_rel() + { + threadi().atomic_thread_fence_acq_rel(); + } + + virtual void atomic_thread_fence_seq_cst() + { + sched(); + threadi().atomic_thread_fence_seq_cst(seq_cst_fence_order_); + } + + virtual thread_id_t get_thread_count() const + { + return thread_count; + } + + virtual generic_mutex_data* mutex_ctor(bool is_rw, bool is_exclusive_recursive, bool is_shared_recursive, bool failing_try_lock) + { + return new (mutex_alloc_->alloc()) generic_mutex_data_impl<thread_count>(is_rw, is_exclusive_recursive, is_shared_recursive, failing_try_lock); + } + + virtual void mutex_dtor(generic_mutex_data* m) + { + generic_mutex_data_impl<thread_count>* mm = static_cast<generic_mutex_data_impl<thread_count>*>(m); + mm->~generic_mutex_data_impl<thread_count>(); + mutex_alloc_->free(mm); + } + + virtual condvar_data* condvar_ctor(bool allow_spurious_wakeups) + { + return new (condvar_alloc_->alloc()) condvar_data_impl<thread_count>(allow_spurious_wakeups); + } + + virtual void condvar_dtor(condvar_data* cv) + { + condvar_data_impl<thread_count>* mm = static_cast<condvar_data_impl<thread_count>*>(cv); + mm->~condvar_data_impl<thread_count>(); + condvar_alloc_->free(mm); + } + + virtual sema_data* sema_ctor(bool spurious_wakeups, unsigned initial_count, unsigned max_count) + { + return new (sema_alloc_->alloc()) sema_data_impl<thread_count>(spurious_wakeups, initial_count, max_count); + } + + virtual void sema_dtor(sema_data* cv) + { + sema_data_impl<thread_count>* mm = static_cast<sema_data_impl<thread_count>*>(cv); + mm->~sema_data_impl<thread_count>(); + sema_alloc_->free(mm); + } + + virtual event_data* event_ctor(bool manual_reset, bool initial_state) + { + return new (event_alloc_->alloc()) event_data_impl<thread_count>(manual_reset, initial_state); + } + + virtual void event_dtor(event_data* cv) + { + event_data_impl<thread_count>* mm = static_cast<event_data_impl<thread_count>*>(cv); + mm->~event_data_impl<thread_count>(); + event_alloc_->free(mm); + } + + context_impl(context_impl const&); + context_impl& operator = (context_impl const&); +}; + +/* +template<typename test_t, typename sched_t> +struct thread_params_t +{ + typedef context_impl<test_t, sched_t> context_t; + + //HANDLE handle; + context_t* ctx; + ostringstream oss; + istringstream* iss; + + //RL_NOCOPY(thread_params_t); +}; + + +template<typename test_t, typename sched_t> +unsigned __stdcall thread_func(void * ctx) +{ + typedef thread_params_t<test_t, sched_t> params_t; + params_t& p = *static_cast<params_t*>(ctx); + p.ctx->simulate(p.oss, *p.iss, false); + return 0; +} +*/ + +template<typename test_t, typename sched_t> +test_result_e run_test(test_params& params, std::ostream& oss, bool second) +{ + typedef context_impl<test_t, sched_t> context_t; + typedef typename sched_t::shared_context_t shared_context_t; + //typedef thread_params_t<test_t, sched_t> params_t; + + //bool destroy_persistent = false; + //context_persistent<test_t, sched_t>* persistent = 0; + //if (persistent_ptr == 0) + //{ + // persistent = new context_persistent<test_t, sched_t>; + // persistent_ptr = persistent; + //} + //else + //{ + // persistent = static_cast<context_persistent<test_t, sched_t>*>(persistent_ptr); + // destroy_persistent = true; + //} + + shared_context_t sctx; + test_result_e res; + + //if (second == false) + { + istringstream iss (params.initial_state); + res = context_t(params, sctx).simulate(oss, iss, second); + } + //else + //{ + // size_t const thread_count = 2; + // vector<params_t*>::type threads (thread_count); + // for (size_t i = 0; i != thread_count; i += 1) + // { + // threads[i] = new params_t; + // threads[i]->iss = new istringstream(params.initial_state); + // threads[i]->ctx = new context_t(params, sctx); + // threads[i]->handle = (HANDLE)(_beginthreadex)(0, 0, &thread_func<test_t, sched_t>, threads[i], 0, 0); + // } + + // for (size_t i = 0; i != thread_count; i += 1) + // { + // (WaitForSingleObject)(threads[i]->handle, (INFINITE)); + // } + + // for (size_t i = 0; i != thread_count; i += 1) + // { + // delete threads[i]->ctx; + // delete threads[i]->iss; + // delete threads[i]; + // } + + // return test_result_success; + //} + + //if (destroy_persistent) + //{ + // delete persistent; + // persistent_ptr = 0; + //} + + return res; +} + + +template<typename test_t> +bool simulate(test_params& params) +{ + char const* test_name = typeid(test_t).name(); + while (test_name[0] >= '0' && test_name[0] <= '9') + test_name += 1; + params.test_name = test_name; + *params.output_stream << params.test_name << std::endl; + + unsigned start_time = get_tick_count(); + + //void* persistent = 0; + + ostringstream oss; + //istringstream iss (params.initial_state); + test_result_e res = test_result_success; + if (random_scheduler_type == params.search_type) + res = run_test<test_t, random_scheduler<test_t::params::thread_count> >(params, oss, false); + else if (fair_full_search_scheduler_type == params.search_type) + res = run_test<test_t, full_search_scheduler<test_t::params::thread_count> >(params, oss, false); + else if (fair_context_bound_scheduler_type == params.search_type) + res = run_test<test_t, context_bound_scheduler<test_t::params::thread_count> >(params, oss, false); + else + RL_VERIFY(false); + + if (test_result_success == res) + { + unsigned t = get_tick_count() - start_time; + if (0 == t) + t = 1; + + *params.output_stream << "iterations: " << params.stop_iteration << std::endl; + *params.output_stream << "total time: " << t << std::endl; + *params.output_stream << "throughput: " << (uint64_t)params.stop_iteration * 1000 / t << std::endl; + *params.output_stream << std::endl; + } + else if (false == params.output_history && false == params.collect_history) + { + ostringstream oss2; + params.initial_state = oss.str(); + //istringstream iss2 (oss.str()); + params.collect_history = true; + params.final_state = oss.str(); + iteration_t const stop_iter = params.stop_iteration; + test_result_e res2 = test_result_success; + if (random_scheduler_type == params.search_type) + res2 = run_test<test_t, random_scheduler<test_t::params::thread_count> >(params, oss2, true); + else if (fair_full_search_scheduler_type == params.search_type) + res2 = run_test<test_t, full_search_scheduler<test_t::params::thread_count> >(params, oss2, true); + else if (fair_context_bound_scheduler_type == params.search_type) + res2 = run_test<test_t, context_bound_scheduler<test_t::params::thread_count> >(params, oss2, true); + else + RL_VERIFY(false); + + // If you hit assert here, then probably your test is non-deterministic + // Check whether you are using functions like ::rand() + // or static variables or values of object addresses (for hashing) in your test + // Replace ::rand() with rl::rand(), eliminate static variables in the test + RL_VERIFY(res == res2); + + RL_VERIFY(params.stop_iteration == stop_iter); + (void)stop_iter; + (void)res2; + } + return test_t::params::expected_result == res; +} + +template<typename test_t> +bool simulate() +{ + test_params params; + return simulate<test_t>(params); +} + +template<void(*func)(), size_t thread_count> +struct simulate_thunk : test_suite<simulate_thunk<func, thread_count>, 1> +{ + static size_t const dynamic_thread_count = thread_count; + void thread(unsigned) + { + func(); + } +}; + +template<void(*func)(), size_t thread_count> +bool execute(test_params& params) +{ + return simulate<simulate_thunk<func, thread_count> >(params); +} + +template<void(*func)(), size_t thread_count> +bool execute() +{ + return simulate<simulate_thunk<func, thread_count> >(); +} + +typedef bool (*simulate_f)(test_params&); + + +template<typename test_t, typename scheduler_t> +void context_impl<test_t, scheduler_t>::fiber_proc(void* thread_index) +{ + ctx().fiber_proc_impl((int)(intptr_t)thread_index); +} + +template<typename type> +void dtor_arr_impl(void* pp) +{ + type* p = (type*)((char*)pp + alignment); + size_t count = *(size_t*)pp; + for (size_t i = 0; i != count; ++i) + { + p->~type(); + p += 1; + } +} + +template<typename type> +type* new_arr_impl(size_t count, rl::debug_info_param info) +{ + RL_VERIFY(alignment >= sizeof(size_t)); + context& c = ctx(); +#ifndef RL_GC + void* mem = c.alloc(alignment + count * sizeof(type), true, info); +#else + void* mem = c.alloc(alignment + count * sizeof(type), true, &dtor_arr_impl<type>, info); +#endif + *(size_t*)mem = count; + size_t i = 0; + char* begin = (char*)mem + alignment; + char* pos = begin; + try + { + for (; i != count; ++i) + { + new (pos) type; + pos += sizeof(type); + } + return (type*)begin; + } + catch (...) + { + pos -= sizeof(type); + i -= 1; + for (; i < count; --i) + { + ((type*)pos)->~type(); + pos -= sizeof(type); + } + ctx().free(mem, true, info); + throw; + } +} + +template<typename type> +void delete_arr_impl(type* p, debug_info_param info) +{ + if (p == 0) + return; + context& c = ctx(); + char* begin = (char*)p - alignment; + size_t count = *(size_t*)begin; + for (size_t i = 0; i != count; ++i) + { + p->~type(); + p += 1; + } + c.free(begin, true, info); +} + +template<typename type> +void delete_impl(type* p, debug_info_param info) +{ + p->~type(); + ctx().free(p, false, info); +} + +template<typename type> +void dtor_impl(void* p) +{ + static_cast<type*>(p)->~type(); +} + +inline unsigned rand(unsigned limit) +{ + return ctx().rand(limit, sched_type_user); +} + +inline unsigned thread_index() +{ + return ctx().threadx_->index_; +} + + +struct new_proxy +{ + debug_info info; + new_proxy(debug_info_param info) + : info(info) + { + //printf(__FUNCSIG__ "\n"); + } + + template<typename T> + T* operator % (T* p) + { + context& c = ctx(); + size_t sz = c.prev_alloc_size(); + if (sz) + { + RL_HIST(memory_alloc_event) {p, sz, false} RL_HIST_END(); + } + return p; + } +}; + +struct delete_proxy +{ + //debug_info info_; + delete_proxy(debug_info_param info) + //: info_(info) + { + ctx().set_debug_info(info); + //printf(__FUNCSIG__ "\n"); + } +}; + +inline void* rl_malloc(size_t sz, debug_info_param info) +{ + return ctx().alloc(sz, false, info); +} + +inline void* rl_calloc(size_t sz, size_t cnt, debug_info_param info) +{ + void* p = ctx().alloc(sz * cnt, false, info); + memset(p, 0, sz * cnt); + return p; +} + +inline void* realloc(void* p, size_t sz, debug_info_param info) +{ + if (sz == 0) + { + ctx().free(p, false, info); + return 0; + } + else + { + void* pp = ctx().alloc(sz, false, info); + memcpy(pp, p, sz); //!!! how much memory to move? + ctx().free(p, false, info); + return pp; + } +} + +inline void rl_free(void* p, debug_info_param info) +{ + ctx().free(p, false, info); +} + +inline size_t hash_ptr(void const* p, size_t size) +{ + return ctx().get_addr_hash(p) % size; +} + +inline void systemwide_fence(debug_info_param info) +{ + context& c = ctx(); + RL_HIST(user_msg_event) {"system-wide fence"} RL_HIST_END(); + c.rl_global_fence(); +} + +} // namespace rl + + +#ifndef RL_GC +inline void* operator new (size_t size, rl::debug_info_param info) +{ + return rl::ctx().alloc(size, false, info); +} + +inline void* operator new [] (size_t size, rl::debug_info_param info) +{ + return rl::ctx().alloc(size, false, info); +} + +inline void operator delete (void* p, rl::debug_info_param info) +{ + rl::ctx().free(p, false, info); +} + +inline void operator delete [] (void* p, rl::debug_info_param info) +{ + rl::ctx().free(p, false, info); +} +#endif + + + +#ifdef RL_GC +inline void* operator new (size_t size, void(*dtor)(void*), rl::debug_info_param info) +{ + return rl::ctx().alloc(size, false, dtor, info); +} + +inline void operator delete (void* p, void(*dtor)(void*), rl::debug_info_param info) +{ + (void)p; + (void)dtor; + (void)info; +} +#endif + +inline void* operator new (size_t size) throw(std::bad_alloc) +{ + if (&rl::ctx()) + return rl::ctx().alloc(size); + else + return (::malloc)(size); +} + +inline void* operator new [] (size_t size) throw(std::bad_alloc) +{ + if (&rl::ctx()) + return rl::ctx().alloc(size); + else + return (::malloc)(size); +} + +inline void operator delete (void* p) throw() +{ + if (&rl::ctx()) + rl::ctx().free(p); + else + (::free)(p); +} + +inline void operator delete [] (void* p) throw() +{ + if (&rl::ctx()) + rl::ctx().free(p); + else + (::free)(p); +} + +#define RL_NEW_PROXY rl::new_proxy($) % new +#define RL_DELETE_PROXY rl::delete_proxy($) , delete + +#endif diff --git a/libs/relacy/relacy/context_addr_hash.hpp b/libs/relacy/relacy/context_addr_hash.hpp @@ -0,0 +1,81 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_CONTEXT_ADDR_HASH_HPP +#define RL_CONTEXT_ADDR_HASH_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" + + +namespace rl +{ + + +struct context_addr_hash_iface +{ + virtual size_t get_addr_hash (void const* p) = 0; + virtual ~context_addr_hash_iface () {} // to calm down g++ +}; + + + + +template<typename base_t, thread_id_t thread_count> +class context_addr_hash_impl : protected base_t +{ +public: + context_addr_hash_impl(thread_id_t thread_count_param, test_params& params) + : base_t(thread_count_param, params) + { + } + + void iteration_begin() + { + base_t::iteration_begin(); + hash_map_.clear(); + hash_seq_ = 0; + } + +private: + struct entry + { + uintptr_t ptr_; + size_t hash_; + }; + typedef map<void const*, size_t>::type hash_map_t; + hash_map_t hash_map_; + size_t hash_seq_; + + virtual size_t get_addr_hash (void const* p) + { + //!!! accept 'table size' to do 'hash % table_size' + // will give more information for state exploration + + hash_map_t::iterator iter (hash_map_.find(p)); + if (iter != hash_map_.end() && iter->first == p) + { + return iter->second; + } + else + { + //!!! distribute hashes more randomly, use rand() + size_t hash = hash_seq_++; + hash_map_.insert(std::make_pair(p, hash)); + return hash; + } + } +}; + + +} + +#endif diff --git a/libs/relacy/relacy/context_base.hpp b/libs/relacy/relacy/context_base.hpp @@ -0,0 +1,322 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_CONTEXT_BASE_HPP +#define RL_CONTEXT_BASE_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "history.hpp" +#include "memory.hpp" +#include "test_result.hpp" +#include "slab_allocator.hpp" +#include "test_params.hpp" +#include "random.hpp" +#include "foreach.hpp" +#include "thread_base.hpp" +#include "context_addr_hash.hpp" + + +#ifdef RL_DEBUGBREAK_ON_ASSERT +# ifdef _MSC_VER +# define RL_DEBUGBREAK_ON_ASSERT_IMPL {if (IsDebuggerPresent()) __debugbreak();} +# else +# define RL_DEBUGBREAK_ON_ASSERT_IMPL {__asm("int3");} +# endif +#else +# define RL_DEBUGBREAK_ON_ASSERT_IMPL +#endif + +#ifdef RL_DEBUGBREAK_ON_FAILURE +# ifdef _MSC_VER +# define RL_DEBUGBREAK_ON_FAILURE_IMPL {if (IsDebuggerPresent()) __debugbreak();} +# else +# define RL_DEBUGBREAK_ON_FAILURE_IMPL {__asm("int3");} +# endif +#else +# define RL_DEBUGBREAK_ON_FAILURE_IMPL +#endif + + + +namespace rl +{ + +class thread_info_base; + +struct atomic_data {}; +struct var_data +{ + virtual void init(thread_info_base& th) = 0; + virtual bool store(thread_info_base& th) = 0; + virtual bool load(thread_info_base& th) = 0; + virtual ~var_data() {} // just to calm down gcc +}; + +struct generic_mutex_data; +struct condvar_data; +struct sema_data; +struct event_data; + + +struct user_msg_event +{ + string msg_; + + void output(std::ostream& s) const + { + s << msg_; + } +}; + +class context; + +template<int fake = 0> +struct context_holder +{ + static context* instance_; + + static long volatile ctx_seq; +}; + +template<int fake> +long volatile context_holder<fake>::ctx_seq = 0; + +class context + : public thread_local_context_iface + , public context_addr_hash_iface + , nocopy<> +{ +public: + static context& instance() + { + //!!! disabled for check in operator new RL_VERIFY(context_holder<>::instance_); + return *context_holder<>::instance_; + } + + virtual atomic_data* atomic_ctor(void* ctx) = 0; + virtual void atomic_dtor(atomic_data* data) = 0; + + virtual var_data* var_ctor() = 0; + virtual void var_dtor(var_data* data) = 0; + + virtual generic_mutex_data* mutex_ctor(bool is_rw, bool is_exclusive_recursive, bool is_shared_recursive, bool failing_try_lock) = 0; + virtual void mutex_dtor(generic_mutex_data* m) = 0; + + virtual condvar_data* condvar_ctor(bool allow_spurious_wakeups) = 0; + virtual void condvar_dtor(condvar_data* cv) = 0; + + virtual sema_data* sema_ctor(bool spurious_wakeups, unsigned initial_count, unsigned max_count) = 0; + virtual void sema_dtor(sema_data* cv) = 0; + + virtual event_data* event_ctor(bool manual_reset, bool initial_state) = 0; + virtual void event_dtor(event_data* cv) = 0; + + virtual void rl_global_fence() = 0; + virtual void sched() = 0; + virtual void yield(unsigned count, debug_info_param info) = 0; + virtual void fail_test(char const* desc, test_result_e res, debug_info_param info) = 0; + virtual void rl_until(char const* desc, debug_info_param info) = 0; + + virtual void* alloc(size_t size, bool is_array, debug_info_param info) = 0; +#ifdef RL_GC + virtual void* alloc(size_t size, bool is_array, void(*dtor)(void*), debug_info_param info) = 0; +#endif + virtual void free(void* p, bool is_array, debug_info_param info) = 0; + + virtual void* alloc(size_t size) = 0; + virtual void free(void* p) = 0; + virtual size_t prev_alloc_size() = 0; + virtual void set_debug_info(debug_info_param info) = 0; + + virtual void fiber_proc_impl(int thread_index) = 0; + + virtual unpark_reason park_current_thread(bool is_timed, + bool allow_spurious_wakeup, + bool do_switch, + debug_info_param info) = 0; + virtual void unpark_thread(thread_id_t th, bool do_switch, debug_info_param info) = 0; + virtual void switch_back(debug_info_param info) = 0; + + virtual void atomic_thread_fence_acquire() = 0; + virtual void atomic_thread_fence_release() = 0; + virtual void atomic_thread_fence_acq_rel() = 0; + virtual void atomic_thread_fence_seq_cst() = 0; + + virtual unsigned rand(unsigned limit, sched_type t) = 0; + + virtual win_waitable_object* create_thread(void*(*fn)(void*), void* ctx) = 0; + + virtual unpark_reason wfmo_park(void** ws, + win_waitable_object** wo, + size_t count, + bool wait_all, + bool is_timed, + debug_info_param info) = 0; + + int get_errno(); + void set_errno(int value); + + thread_info_base* threadx_; + timestamp_t* seq_cst_fence_order_; + + bool invariant_executing; + + RL_INLINE bool collecting_history() const + { + return params_.collect_history && false == invariant_executing; + } + + template<typename event_t> + void exec_log(debug_info_param info, event_t const& ev); + + void exec_log_msg(debug_info_param info, char const* msg) + { + user_msg_event ev = {msg}; + exec_log(info, ev); + } + + bool is_random_sched() const + { + return is_random_sched_; + } + + unsigned get_ctx_seq() const + { + return ctx_seq_; + } + + void disable_preemption(); + void enable_preemption(); + + virtual thread_id_t get_thread_count() const = 0; + + thread_id_t current_thread() const + { + return threadx_->index_; + } + + void iteration_begin() + { + } + +protected: + history_mgr history_; + test_params& params_; + unsigned disable_preemption_; + int disable_alloc_; + + context(thread_id_t thread_count, test_params& params) + : history_(*params.output_stream, thread_count) + , params_(params) + , disable_alloc_(1) + { + RL_VERIFY(0 == context_holder<>::instance_); + context_holder<>::instance_ = this; + + is_random_sched_ = params_.search_type == random_scheduler_type; + +#ifdef _MSC_VER + ctx_seq_ = _InterlockedExchangeAdd(&context_holder<>::ctx_seq, 1) + 1; +#else + ctx_seq_ = __sync_fetch_and_add(&context_holder<>::ctx_seq, 1) + 1; +#endif + } + + virtual ~context() + { + RL_VERIFY(this == context_holder<>::instance_); + context_holder<>::instance_ = 0; + } + +private: + bool is_random_sched_; + unsigned ctx_seq_; +}; + + +template<int fake> +context* context_holder<fake>::instance_ = 0; + + + + +inline context& ctx() +{ + return context::instance(); +} + + +inline int get_errno() +{ + return ctx().get_errno(); +} + +inline void set_errno(int value) +{ + return ctx().set_errno(value); +} + +class preemption_disabler : nocopy<> +{ +public: + preemption_disabler(context& c) + : c_(c) + { + c_.disable_preemption(); + } + + ~preemption_disabler() + { + c_.enable_preemption(); + } + +private: + context& c_; +}; + + +} + + +#define RL_HIST_IMPL(C, INFO, TYPE) \ + do { \ + if (C.collecting_history()) { \ + rl::debug_info const& rl_info_c = INFO; \ + rl::context& rl_hist_c = C; \ + TYPE ev = \ +/**/ + +#define RL_HIST_END() \ + ; \ + rl_hist_c.exec_log(rl_info_c, ev); \ + } \ + } while ((void)0, 0) \ +/**/ + +#define RL_HIST_CTX(TYPE) RL_HIST_IMPL((*this), info, TYPE) + +#define RL_HIST(TYPE) RL_HIST_IMPL(c, info, TYPE) + +#define RL_LOG(desc) rl::ctx().exec_log_msg(RL_INFO, desc) + + + +#ifdef _MSC_VER +# define RL_ASSERT_IMPL(x, res, str, info) do {if (!((void)0, (x))) {{RL_DEBUGBREAK_ON_ASSERT_IMPL} rl::ctx().fail_test(str, res, info);}} while ((void)0, 0) +#else +# define RL_ASSERT_IMPL(x, res, str, info) do {if (!((void)0, (x))) rl::ctx().fail_test(str, res, info);} while ((void)0, 0) +#endif +#define RL_ASSERT(x) RL_ASSERT_IMPL(x, rl::test_result_user_assert_failed, "assertion: " #x, RL_INFO) +#define RL_UNTIL(x) do {if ((x)) rl::ctx().rl_until(#x, RL_INFO);} while ((void)0, 0) + + +#endif diff --git a/libs/relacy/relacy/context_base_impl.hpp b/libs/relacy/relacy/context_base_impl.hpp @@ -0,0 +1,72 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_CONTEXT_BASE_IMPL_HPP +#define RL_CONTEXT_BASE_IMPL_HPP +#ifdef _MSC_VER +# pragma once +#endif + + +namespace rl +{ + +/* +inline void context::disable_history() +{ + RL_VERIFY(threadx_); + threadx_->disable_history_ += 1; +} + +inline void context::enable_history() +{ + RL_VERIFY(threadx_); + RL_VERIFY(threadx_->disable_history_); + threadx_->disable_history_ -= 1; +} +*/ + +inline void context::disable_preemption() +{ + disable_preemption_ += 1; +} + +inline void context::enable_preemption() +{ + disable_preemption_ -= 1; +} + +inline int context::get_errno() +{ + RL_VERIFY(threadx_); + return threadx_->errno_; +} + +inline void context::set_errno(int value) +{ + RL_VERIFY(threadx_); + threadx_->errno_ = value; +} + +template<typename event_t> +void context::exec_log(debug_info_param info, event_t const& ev) +{ + RL_VERIFY(collecting_history()); + disable_alloc_ += 1; + history_.exec_log(threadx_ ? threadx_->index_ : -1, info, ev, params_.output_history); + disable_alloc_ -= 1; +} + + + +} + + + +#endif diff --git a/libs/relacy/relacy/context_bound_scheduler.hpp b/libs/relacy/relacy/context_bound_scheduler.hpp @@ -0,0 +1,168 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_CONTEXT_BOUND_SCHEDULER_HPP +#define RL_CONTEXT_BOUND_SCHEDULER_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "full_search_scheduler.hpp" +#include "foreach.hpp" + + +namespace rl +{ + + +template<thread_id_t thread_count> +struct context_bound_scheduler_thread_info : tree_search_scheduler_thread_info<thread_count> +{ + unsigned sched_count_; + unsigned forced_context_switch_count_; + + void reset(test_params& params) + { + tree_search_scheduler_thread_info<thread_count>::reset(params); + sched_count_ = 0; + forced_context_switch_count_ = 0; + } +}; + + + + +template<thread_id_t thread_count> +class context_bound_scheduler + : public tree_search_scheduler<context_bound_scheduler<thread_count> + , context_bound_scheduler_thread_info<thread_count>, thread_count> +{ +public: + typedef tree_search_scheduler<context_bound_scheduler<thread_count> + , context_bound_scheduler_thread_info<thread_count>, thread_count> base_t; + typedef typename base_t::thread_info_t thread_info_t; + typedef typename base_t::shared_context_t shared_context_t; + + context_bound_scheduler(test_params& params, shared_context_t& ctx, thread_id_t dynamic_thread_count) + : base_t(params, ctx, dynamic_thread_count) + { + } + + thread_id_t iteration_begin_impl() + { + switches_remain_ = this->params_.context_bound; + return base_t::iteration_begin_impl(); + } + + bool can_switch(thread_info_t& t) + { + t.sched_count_ += 1; + return switches_remain_ != 0; + } + + void on_switch(thread_info_t& t) + { + if (t.state_ == thread_state_running) + { + RL_VERIFY(switches_remain_); + switches_remain_ -= 1; + } + else + { + t.forced_context_switch_count_ += 1; + } + } + + double iteration_count_approx() + { + return 1.0; + /* + iteration_t const P = thread_count; + iteration_t const C0 = this->params_.context_bound; + iteration_t total = 1;//factorial(P);// * power(P, P * C0); + for (iteration_t i = 0; i != P - 1; ++i) + total *= power(i + 1, C0 + 1); + //if (C0) + // total *= power(P - 1, P - 1); + if (val(P) > 1) + { + for (iteration_t i = 0; i != P; ++i) + { + iteration_t const N = this->threads_[i].sched_count_; + iteration_t const C = C0 + this->threads_[i].forced_context_switch_count_; + //total *= (iteration_t)pow((double)(threads_[i].sched_count_ + 2) * (thread_count - 1), (int)(params_.context_bound + threads_[i].forced_context_switch_count_)); + total *= factorial(N, C) / factorial(C); + //C$ += C + 1; + //total *= (int)(params_.context_bound + threads_[i].forced_context_switch_count_)); + } + //total *= factorial(C$); + } + else + { + total = 1; + } + //iteration_t total = (iteration_t)pow((double)sched_count / thread_count + 1, (int)(params_.context_bound * thread_count + forced_context_switch_mean_ + 0.5)); + //total *= thread_count; + //total *= (iteration_t)pow((double)thread_count - 1, thread_count); + for (size_t i = 0; i != this->stree_.size(); ++i) + { + if (this->stree_[i].type_ != sched_type_sched) + { + total *= this->stree_[i].count_; + } + } + return (double)total; + */ + } + +private: + unsigned switches_remain_; + + template<typename T> + static T factorial(T x, T i) + { + if (0 == i) + return 1; + T r = x; + for (--i; i; --i) + r *= x - i; + return r; + } + + template<typename T> + static T factorial(T x) + { + if (0 == x) + return 1; + T r = x; + for (T i = x - 1; i; --i) + r *= i; + return r; + } + + template<typename T> + static T power(T x, T y) + { + if (0 == y) + return 1; + T r = x; + for (T i = y - 1; i; --i) + r *= x; + return r; + } + + RL_NOCOPY(context_bound_scheduler); +}; + + +} + +#endif + diff --git a/libs/relacy/relacy/defs.hpp b/libs/relacy/relacy/defs.hpp @@ -0,0 +1,144 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_DEFS_HPP +#define RL_DEFS_HPP +#ifdef _MSC_VER +# pragma once +#endif + + +namespace rl +{ + +typedef int thread_id_t; +typedef size_t timestamp_t; +typedef uint64_t iteration_t; + +size_t const atomic_history_size = 3; +iteration_t const progress_probe_period = 4 * 1024; + +size_t const alignment = 16; + +class context; +class thread_base; +struct win_waitable_object; + +enum sched_type +{ + sched_type_sched, + sched_type_atomic_load, + sched_type_cas_fail, + sched_type_mem_realloc, + sched_type_user, +}; + +enum unpark_reason +{ + unpark_reason_normal, + unpark_reason_timeout, + unpark_reason_spurious, +}; + +struct debug_info +{ + char const* func_; + char const* file_; + unsigned line_; + + debug_info(char const* func = "", char const* file = "", unsigned line = 0) + : func_(func) + , file_(file) + , line_(line) + { + } +}; + +typedef debug_info const& debug_info_param; + +inline void assert_failed(char const* cond, debug_info_param info) +{ + std::cout << "RELACY INTERNAL ASSERT FAILED: '" << cond + << "' at " << info.file_ << ":" << info.line_ << " (" << info.func_ << ")" << std::endl; +} + +template<typename T> +struct raw_allocator : std::allocator<T> +{ + template<class Y> + struct rebind + { + typedef raw_allocator<Y> other; + }; + + template<typename Y> + raw_allocator(raw_allocator<Y> const&) + { + } + + raw_allocator(raw_allocator const& rhs) + : std::allocator<T>(rhs) + { + } + + raw_allocator() + : std::allocator<T>() + { + } + + T* allocate(size_t count, void* = 0) + { + return (T*)(::malloc)(count * sizeof(T)); + } + + void deallocate(T* p, size_t) + { + (::free)(p); + } +}; + + +template<typename T> +struct vector +{ + typedef std::vector<T, raw_allocator<T> > type; +}; + +template<typename T> +struct queue +{ + typedef std::queue<T, std::deque<T, raw_allocator<T> > > type; +}; + +template<typename T> +struct stack +{ + typedef std::stack<T, std::vector<T, raw_allocator<T> > > type; +}; + +template<typename T> +struct set +{ + typedef std::set<T, std::less<T>, raw_allocator<T> > type; +}; + +template<typename T, typename Y> +struct map +{ + typedef std::map<T, Y, std::less<T>, raw_allocator<std::pair<T, Y> > > type; +}; + +typedef std::basic_string<char, std::char_traits<char>, raw_allocator<char> > string; +typedef std::basic_ostringstream<char, std::char_traits<char>, raw_allocator<char> > ostringstream; +typedef std::basic_istringstream<char, std::char_traits<char>, raw_allocator<char> > istringstream; + +} + + +#endif diff --git a/libs/relacy/relacy/dyn_thread.hpp b/libs/relacy/relacy/dyn_thread.hpp @@ -0,0 +1,53 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_DYN_THREAD_HPP +#define RL_DYN_THREAD_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "context_base.hpp" +#include "stdlib/semaphore.hpp" + + +namespace rl +{ + + +class dyn_thread : nocopy<> +{ +public: + dyn_thread() + { + handle_ = 0; + } + + void start(void*(*fn)(void*), void* arg) + { + RL_VERIFY(handle_ == 0); + handle_ = ctx().create_thread(fn, arg); + } + + void join() + { + RL_VERIFY(handle_); + handle_->wait(false, false, $); + handle_ = 0; + } + +private: + win_waitable_object* handle_; +}; + + +} + +#endif diff --git a/libs/relacy/relacy/dyn_thread_ctx.hpp b/libs/relacy/relacy/dyn_thread_ctx.hpp @@ -0,0 +1,127 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_DYN_THREAD_CTX_HPP +#define RL_DYN_THREAD_CTX_HPP +#ifdef _MSC_VER +# pragma once +#endif + + +#include "base.hpp" +#include "waitset.hpp" +#include "sync_var.hpp" +#include "stdlib/semaphore.hpp" + + +namespace rl +{ + + +template<thread_id_t thread_count> +class thread_sync_object : public win_waitable_object +{ +public: + thread_sync_object() + { + } + + void iteration_begin() + { + finished_ = false; + sync_.iteration_begin(); + RL_VERIFY(!ws_); + } + + void on_create() + { + sync_.release(ctx().threadx_); + } + + void on_start() + { + RL_VERIFY(finished_ == false); + context& c = ctx(); + sync_.acquire(c.threadx_); + } + + void on_finish() + { + RL_VERIFY(finished_ == false); + context& c = ctx(); + finished_ = true; + sync_.release(c.threadx_); + ws_.unpark_all(c, $); + } + +private: + bool finished_; + waitset<thread_count> ws_; + sync_var<thread_count> sync_; + + virtual void deinit(debug_info_param info) + { + (void)info; + } + + virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info) + { + context& c = ctx(); + if (finished_) + { + sync_.acquire(c.threadx_); + return sema_wakeup_reason_success; + } + else if (try_wait) + { + sync_.acquire(c.threadx_); + return sema_wakeup_reason_failed; + } + else + { + unpark_reason reason = ws_.park_current(c, is_timed, false, false, info); + sync_.acquire(c.threadx_); + if (reason == unpark_reason_normal) + return sema_wakeup_reason_success; + else if (reason == unpark_reason_timeout) + return sema_wakeup_reason_timeout; + RL_VERIFY(false); + return sema_wakeup_reason_failed; + } + } + + virtual bool signal(debug_info_param info) + { + RL_ASSERT_IMPL(false, test_result_thread_signal, "trying to signal a thread", info); + return false; + } + + virtual bool is_signaled(debug_info_param info) + { + (void)info; + return finished_; + } + + virtual void memory_acquire(debug_info_param info) + { + (void)info; + sync_.acquire(ctx().threadx_); + } + + virtual void* prepare_wait(debug_info_param info) + { + (void)info; + return &ws_; + } +}; + + +} + +#endif diff --git a/libs/relacy/relacy/foreach.hpp b/libs/relacy/relacy/foreach.hpp @@ -0,0 +1,133 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_FOREACH_HPP +#define RL_FOREACH_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" + +namespace rl +{ + + +template<typename T, thread_id_t i, thread_id_t index> +struct foreach_thread_impl +{ + template<typename F> + RL_INLINE static void exec( + T* v1, + F func) + { + (*func)(v1[i]); + foreach_thread_impl<T, i + 1, index - 1>::exec(v1, func); + } + + RL_INLINE static void exec( + T* v1, T* v2, + void (*func)(T& e1, T& e2)) + { + (*func)(v1[i], v2[i]); + foreach_thread_impl<T, i + 1, index - 1>::exec(v1, v2, func); + } + + RL_INLINE static void exec( + T* v1, T* v2, T* v3, + void (*func)(T& e1, T& e2, T& e3)) + { + (*func)(v1[i], v2[i], v3[i]); + foreach_thread_impl<T, i + 1, index - 1>::exec(v1, v2, v3, func); + } +}; + +template<typename T, thread_id_t i> +struct foreach_thread_impl<T, i, 0> +{ + template<typename F> + RL_INLINE static void exec( + T*, + F) + { + } + + RL_INLINE static void exec( + T*, T*, + void (*)(T&, T&)) + { + } + + RL_INLINE static void exec( + T*, T*, T*, + void (*)(T&, T&, T&)) + { + } +}; + +template<thread_id_t count, typename T, typename F> +RL_INLINE void foreach( + T* v1, + F func) +{ + foreach_thread_impl<T, 0, count>::exec(v1, func); +} + +template<thread_id_t count, typename T> +RL_INLINE void foreach( + T* v1, T* v2, + void (*func)(T& e1, T& e2)) +{ + foreach_thread_impl<T, 0, count>::exec(v1, v2, func); +} + +template<thread_id_t count, typename T> +RL_INLINE void foreach( + T* v1, T* v2, T* v3, + void (*func)(T& e1, T& e2, T& e3)) +{ + foreach_thread_impl<T, 0, count>::exec(v1, v2, v3, func); +} + +RL_INLINE void assign_zero(timestamp_t& elem) +{ + elem = 0; +} + +RL_INLINE void assign_zero_u(unsigned& elem) +{ + elem = 0; +} + +template<timestamp_t value> +RL_INLINE void assign(timestamp_t& elem) +{ + elem = value; +} + +RL_INLINE void assign(timestamp_t& elem1, timestamp_t& elem2) +{ + elem1 = elem2; +} + +RL_INLINE void assign_max(timestamp_t& elem1, timestamp_t& elem2) +{ + if (elem2 > elem1) + elem1 = elem2; +} + +RL_INLINE void plus_one(timestamp_t& elem) +{ + elem += 1; +} + +} + + +#endif diff --git a/libs/relacy/relacy/full_search_scheduler.hpp b/libs/relacy/relacy/full_search_scheduler.hpp @@ -0,0 +1,421 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_FULL_SEARCH_SCHEDULER_HPP +#define RL_FULL_SEARCH_SCHEDULER_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "scheduler.hpp" +#include "foreach.hpp" + + +namespace rl +{ + + +template<thread_id_t thread_count> +struct tree_search_scheduler_thread_info : scheduler_thread_info +{ + unsigned yield_sched_count_ [thread_count]; + unsigned yield_priority_ [thread_count]; + unsigned total_yield_priority_; + //unsigned subsequent_timed_waits_; + + void reset(test_params& params) + { + scheduler_thread_info::reset(params); + foreach<thread_count>(yield_sched_count_, &assign_zero_u); + foreach<thread_count>(yield_priority_, &assign_zero_u); + total_yield_priority_ = 0; + //subsequent_timed_waits_ = 0; + } +}; + + + + +template<typename derived_t, typename thread_info_type, thread_id_t thread_count> +class tree_search_scheduler + : public scheduler<derived_t, thread_info_type, thread_count> +{ +public: + typedef scheduler<derived_t, thread_info_type, thread_count> base_t; + typedef typename base_t::thread_info_t thread_info_t; + typedef typename base_t::shared_context_t shared_context_t; + + struct task_t + { + }; + + tree_search_scheduler(test_params& params, shared_context_t& ctx, thread_id_t dynamic_thread_count) + : base_t(params, ctx, dynamic_thread_count) + , stree_depth_() + , iteration_count_mean_() + , iteration_count_probe_count_() + { + stree_.reserve(128); + } + + thread_id_t iteration_begin_impl() + { + stree_depth_ = 0; + + unsigned const index = rand_impl(this->running_threads_count, sched_type_sched); + thread_id_t const th = this->running_threads[index]; + return th; + } + + bool iteration_end_impl() + { + RL_VERIFY(stree_depth_ == stree_.size()); + + for (size_t i = stree_.size(); i != 0; --i) + { + stree_node& n = stree_[i - 1]; + if (n.index_ != n.count_ - 1) + { + stree_.resize(i); + n.index_ += 1; + RL_VERIFY(n.index_ < n.count_); + return false; + } + } + return true; + } + + void yield_priority(unsigned yield) + { + RL_VERIFY(yield); + + thread_info_t& t = *this->thread_; + thread_id_t const& running_thread_count = this->running_threads_count; + + for (thread_id_t i = 0; i != thread_count; ++i) + { + thread_info_t& y = this->threads_[i]; + RL_VERIFY(0 == y.yield_priority_[t.index_]); + + if (t.index_ != i + && y.yield_sched_count_[t.index_] < yield + && y.state_ != thread_state_finished) + { + y.yield_priority_[t.index_] = yield; + y.total_yield_priority_ += yield; + this->block_thread(t.index_, false); + } + y.yield_sched_count_[t.index_] = 0; + } + + if (0 == running_thread_count) + purge_blocked_threads(); + } + + thread_id_t schedule_impl(unpark_reason& reason, unsigned yield) + { + thread_info_t& t = *this->thread_; + thread_id_t const& running_thread_count = this->running_threads_count; + +#ifdef _DEBUG + { + unsigned tmp = 0; + for (thread_id_t i = 0; i != thread_count; ++i) + tmp += t.yield_priority_[i]; + RL_VERIFY(t.total_yield_priority_ == tmp); + } +#endif + + if (t.total_yield_priority_) + { + for (thread_id_t i = 0; i != thread_count; ++i) + { + unsigned& prio = t.yield_priority_[i]; + if (prio) + { + prio -= 1; + t.total_yield_priority_ -= 1; + if (0 == prio) + { + this->unblock_thread(i); + } + } + t.yield_sched_count_[i] += 1; + } + } + + if (yield) + yield_priority(yield); + + reason = unpark_reason_normal; + thread_id_t thread_index = 0; + + if (self().can_switch(t) + || t.state_ != thread_state_running) + { + thread_id_t timed_thread_count = this->timed_thread_count_; + if (timed_thread_count) + { + thread_id_t cnt; + if (running_thread_count) + cnt = timed_thread_count + 1; + else + //!!! spurious thread will be never unblocked in such case - bad + cnt = timed_thread_count; + thread_id_t idx = this->rand(cnt, sched_type_user); + if (idx < timed_thread_count) + { + thread_info_t* thr = this->timed_threads_[idx]; + thread_index = thr->index_; + //??? suboptimal state space exploration + // if (1 != thr->block_count_) then we are making + // superfluous rand() + if (1 == thr->block_count_) + { + this->unpark_thread(thread_index); + RL_VERIFY(thr->state_ == thread_state_running); + reason = unpark_reason_timeout; + } + } + } + + RL_VERIFY(running_thread_count); + + if (unpark_reason_normal == reason) + { + thread_id_t spurious_thread_count = this->spurious_thread_count_; + if (spurious_thread_count) + { + thread_id_t cnt = spurious_thread_count + 1; + thread_id_t idx = this->rand(cnt, sched_type_user); + if (idx < spurious_thread_count) + { + thread_info_t* thr = this->spurious_threads_[idx]; + thread_index = thr->index_; + //??? suboptimal state space exploration + // if (1 != thr->block_count_) then we are making + // superfluous rand() + if (1 == thr->block_count_) + { + this->unpark_thread(thread_index); + RL_VERIFY(thr->state_ == thread_state_running); + reason = unpark_reason_spurious; + } + } + } + } + + if (unpark_reason_normal == reason) + { + if (1 != running_thread_count) + { + unsigned const index = this->rand(running_thread_count, sched_type_sched); + thread_index = this->running_threads[index]; + } + else + { + thread_index = this->running_threads[0]; + } + } + } + else + { + RL_VERIFY(t.state_ == thread_state_running); + thread_index = t.index_; + } + + if (t.index_ == thread_index) + return thread_index; + + //t.subsequent_timed_waits_ = 0; + self().on_switch(t); + + return thread_index; + } + + void thread_finished_impl() + { + } + + void purge_blocked_threads() + { + for (thread_id_t i = 0; i != thread_count; ++i) + { + on_thread_block(i, false); + } + } + + unsigned rand_impl(unsigned limit, sched_type t) + { + unsigned result = 0; + size_t const size = stree_.size(); + if (stree_depth_ == size) + { + stree_node n = {limit, 0, t}; + stree_.push_back(n); + } + else + { + RL_VERIFY(size); + stree_node& n = stree_[stree_depth_]; + + // If you hit assert here, then probably your test is non-deterministic + // Check whether you are using functions like ::rand() + // or static variables or values of object addresses (for hashing) in your test + // Replace ::rand() with rl::rand(), eliminate static variables in the test + RL_VERIFY(n.type_ == t); + + RL_VERIFY(n.count_ == limit); + RL_VERIFY(n.index_ < n.count_); + result = n.index_; + } + stree_depth_ += 1; + return result; + } + + iteration_t iteration_count_impl() + { + double current = self().iteration_count_approx(); + if (current <= this->iter_) + current = this->iter_ + 1.0; + + iteration_count_mean_ *= iteration_count_probe_count_; + iteration_count_probe_count_ += 1; + iteration_count_mean_ /= iteration_count_probe_count_; + iteration_count_mean_ += current / iteration_count_probe_count_; + + iteration_t result = (iteration_t)(iteration_count_mean_ + 0.5); + if (result <= this->iter_) + result = this->iter_ + 1; + return result; + } + + void get_state_impl(std::ostream& ss) + { + ss << (unsigned)stree_.size() << " "; + for (size_t i = 0; i != stree_.size(); ++i) + { + stree_node& n = stree_[i]; + ss << n.count_ << " "; + ss << n.index_ << " "; + ss << static_cast<unsigned>(n.type_) << " "; + } + } + + void set_state_impl(std::istream& ss) + { + size_t size = 0; + ss >> size; + for (size_t i = 0; i != size; ++i) + { + stree_node n = {}; + ss >> n.count_; + ss >> n.index_; + unsigned type = 0; + ss >> type; + n.type_ = static_cast<sched_type>(type); + stree_.push_back(n); + } + } + + void on_thread_block(thread_id_t th, bool yield) + { + //!!! doubled in schedule_impl() + thread_info_t& t = this->threads_[th]; + if (t.total_yield_priority_) + { + for (thread_id_t i = 0; i != thread_count; ++i) + { + if (t.yield_priority_[i]) + { + t.total_yield_priority_ -= t.yield_priority_[i]; + t.yield_priority_[i] = 0; + this->unblock_thread(i); + } + } + } + + (void)yield; + //if (yield) + // yield_priority(1); + } + +protected: + struct stree_node + { + unsigned count_; + unsigned index_; + sched_type type_; + unsigned pad_; + }; + + typedef typename vector<stree_node>::type stree_t; + stree_t stree_; + size_t stree_depth_; + +private: + double iteration_count_mean_; + unsigned iteration_count_probe_count_; + + derived_t& self() + { + return *static_cast<derived_t*>(this); + } + + RL_NOCOPY(tree_search_scheduler); +}; + + + + +template<thread_id_t thread_count> +class full_search_scheduler + : public tree_search_scheduler<full_search_scheduler<thread_count> + , tree_search_scheduler_thread_info<thread_count>, thread_count> +{ +public: + typedef tree_search_scheduler<full_search_scheduler<thread_count> + , tree_search_scheduler_thread_info<thread_count>, thread_count> base_t; + typedef typename base_t::thread_info_t thread_info_t; + typedef typename base_t::shared_context_t shared_context_t; + + full_search_scheduler(test_params& params, shared_context_t& ctx, thread_id_t dynamic_thread_count) + : base_t(params, ctx, dynamic_thread_count) + { + } + + bool can_switch(thread_info_t& /*t*/) + { + return true; + } + + void on_switch(thread_info_t& /*t*/) + { + } + + double iteration_count_approx() + { + double total = 1; + size_t const size = this->stree_.size(); + for (size_t i = 0; i != size; ++i) + { + total *= this->stree_[i].count_; + } + return total; + } + + RL_NOCOPY(full_search_scheduler); +}; + + +} + +#endif + diff --git a/libs/relacy/relacy/history.hpp b/libs/relacy/relacy/history.hpp @@ -0,0 +1,205 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_HISTORY_HPP +#define RL_HISTORY_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" + + +namespace rl +{ + + +typedef void (*event_output_f)(std::ostream& s, void const* ev); +typedef void (*event_dtor_f)(void* ev); + +struct history_entry +{ + thread_id_t thread_index_; + debug_info info_; + void* ev_; + event_output_f output_; + event_dtor_f dtor_; + + history_entry(thread_id_t thread_index, debug_info_param info, void* ev, event_output_f output, event_dtor_f dtor) + : thread_index_(thread_index) + , info_(info) + , ev_(ev) + , output_(output) + , dtor_(dtor) + { + } +}; + +template<typename T> +void event_output(std::ostream& s, void const* ev) +{ + static_cast<T const*>(ev)->output(s); +} + +template<typename T> +void event_dtor(void* ev) +{ + delete static_cast<T*>(ev); +} + + +struct user_event +{ + char const* desc_; + + void output(std::ostream& s) const + { + s << desc_; + } +}; + +inline string strip_path(char const* filename) +{ + char const* slash = strrchr(filename, '\\'); + if (slash) + return slash + 1; + else + return filename; +} + +inline std::ostream& operator << (std::ostream& ss, debug_info_param info) +{ + /* + char const* func = info; + char const* file = info + strlen(info) + 1; + char const* line = file + strlen(file) + 1; + */ + +#ifdef RL_MSVC_OUTPUT + ss << info.file_ << "(" << info.line_ << ") : "; +#else + ss << info.func_ << ", " << strip_path(info.file_) << "(" << info.line_ << ")"; +#endif + return ss; +} + + + +class history_mgr : nocopy<> +{ +public: + history_mgr(std::ostream& stream, thread_id_t thread_count) + : thread_count_(thread_count) + , out_stream_(stream) + { + } + + ~history_mgr() + { + clear(); + } + + template<typename event_t> + void exec_log(thread_id_t th, debug_info_param info, event_t const& ev, bool output_history) + { + exec_history_.push_back(history_entry(th, info, new event_t(ev), &event_output<event_t>, &event_dtor<event_t>)); + if (output_history) + { + output(exec_history_.size() - 1); + } + } + + void print_exec_history(bool output_history) + { + size_t const buf_size = 4096; + char buf [buf_size + 1]; + + size_t const count = exec_history_.size(); + if (false == output_history) + { + sprintf(buf, "execution history (%u):\n", (unsigned)count); + out_stream_ << buf; +#if defined(_MSC_VER) && defined(RL_MSVC_OUTPUT) + OutputDebugStringA(buf); +#endif + + for (size_t i = 0; i != count; ++i) + { + output(i); + } + } + out_stream_ << "\n"; +#if defined(_MSC_VER) && defined(RL_MSVC_OUTPUT) + OutputDebugStringA("\n"); +#endif + + for (thread_id_t th = 0; th != thread_count_; ++th) + { + sprintf(buf, "thread %u:\n", th); + out_stream_ << buf; +#if defined(_MSC_VER) && defined(RL_MSVC_OUTPUT) + OutputDebugStringA(buf); +#endif + for (size_t i = 0; i != count; ++i) + { + if (exec_history_[i].thread_index_ == th) + { + output(i); + } + } + out_stream_ << "\n"; +#if defined(_MSC_VER) && defined(RL_MSVC_OUTPUT) + OutputDebugStringA("\n"); +#endif + } + } + + void clear() + { + for (size_t i = 0; i != exec_history_.size(); ++i) + { + history_entry const& ent = exec_history_[i]; + ent.dtor_(ent.ev_); + } + exec_history_.clear(); + } + +private: + vector<history_entry>::type exec_history_; + thread_id_t thread_count_; + std::ostream& out_stream_; + + void output(size_t i) + { + std::ostringstream stream; + + history_entry const& ent = exec_history_[i]; +#ifdef RL_MSVC_OUTPUT + { + stream << ent.info_ << "[" << i << "] " << ent.thread_index_ << ": "; + ent.output_(stream, ent.ev_); + stream << std::endl; + } +#else + stream << "[" << (unsigned)i << "] " << ent.thread_index_ << ": "; + ent.output_(stream, ent.ev_); + stream << ", in " << ent.info_ << std::endl; +#endif + + out_stream_ << stream.str(); +#if defined(_MSC_VER) && defined(RL_MSVC_OUTPUT) + OutputDebugStringA(stream.str().c_str()); +#endif + } +}; + + +} + +#endif diff --git a/libs/relacy/relacy/java.hpp b/libs/relacy/relacy/java.hpp @@ -0,0 +1,301 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_JAVA_HPP +#define RL_JAVA_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" + + +namespace rl +{ + +/* + +Hierarchy For Package java.util.concurrent.locks + +Class Hierarchy + + * java.lang.Object + o java.util.concurrent.locks.AbstractQueuedSynchronizer (implements java.io.Serializable) + o java.util.concurrent.locks.AbstractQueuedSynchronizer.ConditionObject (implements java.util.concurrent.locks.Condition, java.io.Serializable) + o java.util.concurrent.locks.LockSupport + o java.util.concurrent.locks.ReentrantLock (implements java.util.concurrent.locks.Lock, java.io.Serializable) + o java.util.concurrent.locks.ReentrantReadWriteLock (implements java.util.concurrent.locks.ReadWriteLock, java.io.Serializable) + o java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock (implements java.util.concurrent.locks.Lock, java.io.Serializable) + o java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock (implements java.util.concurrent.locks.Lock, java.io.Serializable) + +Interface Hierarchy + + * java.util.concurrent.locks.Condition + * java.util.concurrent.locks.Lock + * java.util.concurrent.locks.ReadWriteLock +*/ + + + + + +/* + +java.util.concurrent.Semaphore + + + +Public Constructors +public Semaphore(int permits) +Creates a Semaphore with the given number of permits and nonfair fairness setting. +Parameters +permits the initial number of permits available. This value may be negative, in which case releases must occur before any acquires will be granted. +public Semaphore(int permits, boolean fair) +Creates a Semaphore with the given number of permits and the given fairness setting. +Parameters +permits the initial number of permits available. This value may be negative, in which case releases must occur before any acquires will be granted. +fair true if this semaphore will guarantee first-in first-out granting of permits under contention, else false. +Public Methods +public void acquire() +Acquires a permit from this semaphore, blocking until one is available, or the thread is interrupted. + +Acquires a permit, if one is available and returns immediately, reducing the number of available permits by one. + +If no permit is available then the current thread becomes disabled for thread scheduling purposes and lies dormant until one of two things happens: + + * Some other thread invokes the release() method for this semaphore and the current thread is next to be assigned a permit; or + * Some other thread interrupts the current thread. + +If the current thread: + + * has its interrupted status set on entry to this method; or + * is interrupted while waiting for a permit, + +then InterruptedException is thrown and the current thread's interrupted status is cleared. +Throws +InterruptedException if the current thread is interrupted +See Also + + * interrupt() + +public void acquire(int permits) +Acquires the given number of permits from this semaphore, blocking until all are available, or the thread is interrupted. + +Acquires the given number of permits, if they are available, and returns immediately, reducing the number of available permits by the given amount. + +If insufficient permits are available then the current thread becomes disabled for thread scheduling purposes and lies dormant until one of two things happens: + + * Some other thread invokes one of the release methods for this semaphore, the current thread is next to be assigned permits and the number of available permits satisfies this request; or + * Some other thread interrupts the current thread. + +If the current thread: + + * has its interrupted status set on entry to this method; or + * is interrupted while waiting for a permit, + +then InterruptedException is thrown and the current thread's interrupted status is cleared. Any permits that were to be assigned to this thread are instead assigned to the next waiting thread(s), as if they had been made available by a call to release(). +Parameters +permits the number of permits to acquire +Throws +InterruptedException if the current thread is interrupted +IllegalArgumentException if permits less than zero. +See Also + + * interrupt() + +public void acquireUninterruptibly(int permits) +Acquires the given number of permits from this semaphore, blocking until all are available. + +Acquires the given number of permits, if they are available, and returns immediately, reducing the number of available permits by the given amount. + +If insufficient permits are available then the current thread becomes disabled for thread scheduling purposes and lies dormant until some other thread invokes one of the release methods for this semaphore, the current thread is next to be assigned permits and the number of available permits satisfies this request. + +If the current thread is interrupted while waiting for permits then it will continue to wait and its position in the queue is not affected. When the thread does return from this method its interrupt status will be set. +Parameters +permits the number of permits to acquire +Throws +IllegalArgumentException if permits less than zero. +public void acquireUninterruptibly() +Acquires a permit from this semaphore, blocking until one is available. + +Acquires a permit, if one is available and returns immediately, reducing the number of available permits by one. + +If no permit is available then the current thread becomes disabled for thread scheduling purposes and lies dormant until some other thread invokes the release() method for this semaphore and the current thread is next to be assigned a permit. + +If the current thread is interrupted while waiting for a permit then it will continue to wait, but the time at which the thread is assigned a permit may change compared to the time it would have received the permit had no interruption occurred. When the thread does return from this method its interrupt status will be set. +public int availablePermits() +Returns the current number of permits available in this semaphore. + +This method is typically used for debugging and testing purposes. +Returns + + * the number of permits available in this semaphore. + +public int drainPermits() +Acquire and return all permits that are immediately available. +Returns + + * the number of permits + +public final int getQueueLength() +Returns an estimate of the number of threads waiting to acquire. The value is only an estimate because the number of threads may change dynamically while this method traverses internal data structures. This method is designed for use in monitoring of the system state, not for synchronization control. +Returns + + * the estimated number of threads waiting for this lock + +public final boolean hasQueuedThreads() +Queries whether any threads are waiting to acquire. Note that because cancellations may occur at any time, a true return does not guarantee that any other thread will ever acquire. This method is designed primarily for use in monitoring of the system state. +Returns + + * true if there may be other threads waiting to acquire the lock. + +public boolean isFair() +Returns true if this semaphore has fairness set true. +Returns + + * true if this semaphore has fairness set true. + +public void release(int permits) +Releases the given number of permits, returning them to the semaphore. + +Releases the given number of permits, increasing the number of available permits by that amount. If any threads are blocking trying to acquire permits, then the one that has been waiting the longest is selected and given the permits that were just released. If the number of available permits satisfies that thread's request then that thread is re-enabled for thread scheduling purposes; otherwise the thread continues to wait. If there are still permits available after the first thread's request has been satisfied, then those permits are assigned to the next waiting thread. If it is satisfied then it is re-enabled for thread scheduling purposes. This continues until there are insufficient permits to satisfy the next waiting thread, or there are no more waiting threads. + +There is no requirement that a thread that releases a permit must have acquired that permit by calling acquire. Correct usage of a semaphore is established by programming convention in the application. +Parameters +permits the number of permits to release +Throws +IllegalArgumentException if permits less than zero. +public void release() +Releases a permit, returning it to the semaphore. + +Releases a permit, increasing the number of available permits by one. If any threads are blocking trying to acquire a permit, then one is selected and given the permit that was just released. That thread is re-enabled for thread scheduling purposes. + +There is no requirement that a thread that releases a permit must have acquired that permit by calling acquire(). Correct usage of a semaphore is established by programming convention in the application. +public String toString() +Returns a string identifying this semaphore, as well as its state. The state, in brackets, includes the String "Permits =" followed by the number of permits. +Returns + + * a string identifying this semaphore, as well as its state + +public boolean tryAcquire(long timeout, TimeUnit unit) +Acquires a permit from this semaphore, if one becomes available within the given waiting time and the current thread has not been interrupted. + +Acquires a permit, if one is available and returns immediately, with the value true, reducing the number of available permits by one. + +If no permit is available then the current thread becomes disabled for thread scheduling purposes and lies dormant until one of three things happens: + + * Some other thread invokes the release() method for this semaphore and the current thread is next to be assigned a permit; or + * Some other thread interrupts the current thread; or + * The specified waiting time elapses. + +If a permit is acquired then the value true is returned. + +If the current thread: + + * has its interrupted status set on entry to this method; or + * is interrupted while waiting to acquire a permit, + +then InterruptedException is thrown and the current thread's interrupted status is cleared. + +If the specified waiting time elapses then the value false is returned. If the time is less than or equal to zero, the method will not wait at all. +Parameters +timeout the maximum time to wait for a permit +unit the time unit of the timeout argument. +Returns + + * true if a permit was acquired and false if the waiting time elapsed before a permit was acquired. + +Throws +InterruptedException if the current thread is interrupted +See Also + + * interrupt() + +public boolean tryAcquire(int permits, long timeout, TimeUnit unit) +Acquires the given number of permits from this semaphore, if all become available within the given waiting time and the current thread has not been interrupted. + +Acquires the given number of permits, if they are available and returns immediately, with the value true, reducing the number of available permits by the given amount. + +If insufficient permits are available then the current thread becomes disabled for thread scheduling purposes and lies dormant until one of three things happens: + + * Some other thread invokes one of the release methods for this semaphore, the current thread is next to be assigned permits and the number of available permits satisfies this request; or + * Some other thread interrupts the current thread; or + * The specified waiting time elapses. + +If the permits are acquired then the value true is returned. + +If the current thread: + + * has its interrupted status set on entry to this method; or + * is interrupted while waiting to acquire the permits, + +then InterruptedException is thrown and the current thread's interrupted status is cleared. Any permits that were to be assigned to this thread, are instead assigned to the next waiting thread(s), as if they had been made available by a call to release(). + +If the specified waiting time elapses then the value false is returned. If the time is less than or equal to zero, the method will not wait at all. Any permits that were to be assigned to this thread, are instead assigned to the next waiting thread(s), as if they had been made available by a call to release(). +Parameters +permits the number of permits to acquire +timeout the maximum time to wait for the permits +unit the time unit of the timeout argument. +Returns + + * true if all permits were acquired and false if the waiting time elapsed before all permits were acquired. + +Throws +InterruptedException if the current thread is interrupted +IllegalArgumentException if permits less than zero. +See Also + + * interrupt() + +public boolean tryAcquire(int permits) +Acquires the given number of permits from this semaphore, only if all are available at the time of invocation. + +Acquires the given number of permits, if they are available, and returns immediately, with the value true, reducing the number of available permits by the given amount. + +If insufficient permits are available then this method will return immediately with the value false and the number of available permits is unchanged. + +Even when this semaphore has been set to use a fair ordering policy, a call to tryAcquire will immediately acquire a permit if one is available, whether or not other threads are currently waiting. This "barging" behavior can be useful in certain circumstances, even though it breaks fairness. If you want to honor the fairness setting, then use tryAcquire(permits, 0, TimeUnit.SECONDS) which is almost equivalent (it also detects interruption). +Parameters +permits the number of permits to acquire +Returns + + * true if the permits were acquired and false otherwise. + +Throws +IllegalArgumentException if permits less than zero. +public boolean tryAcquire() +Acquires a permit from this semaphore, only if one is available at the time of invocation. + +Acquires a permit, if one is available and returns immediately, with the value true, reducing the number of available permits by one. + +If no permit is available then this method will return immediately with the value false. + +Even when this semaphore has been set to use a fair ordering policy, a call to tryAcquire() will immediately acquire a permit if one is available, whether or not other threads are currently waiting. This "barging" behavior can be useful in certain circumstances, even though it breaks fairness. If you want to honor the fairness setting, then use tryAcquire(0, TimeUnit.SECONDS) which is almost equivalent (it also detects interruption). +Returns + + * true if a permit was acquired and false otherwise. + +Protected Methods +protected Collection<Thread> getQueuedThreads() +Returns a collection containing threads that may be waiting to acquire. Because the actual set of threads may change dynamically while constructing this result, the returned collection is only a best-effort estimate. The elements of the returned collection are in no particular order. This method is designed to facilitate construction of subclasses that provide more extensive monitoring facilities. +Returns + + * the collection of threads + +protected void reducePermits(int reduction) +Shrinks the number of available permits by the indicated reduction. This method can be useful in subclasses that use semaphores to track resources that become unavailable. This method differs from acquire in that it does not block waiting for permits to become available. +Parameters +reduction the number of permits to remove +Throws +IllegalArgumentException if reduction is negative +*/ + +} + +#endif diff --git a/libs/relacy/relacy/java_atomic.hpp b/libs/relacy/relacy/java_atomic.hpp @@ -0,0 +1,155 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_JAVA_ATOMIC_HPP +#define RL_JAVA_ATOMIC_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "atomic.hpp" + + +namespace rl +{ + + +template<typename T> class jatomic; + + +template<typename T> +class jatomic_proxy +{ +public: + T get() const + { + return var_.load(mo_seq_cst, info_); + } + + void set(T value) + { + var_.store(value, mo_seq_cst, info_); + } + + T addAndGet(T delta) + { + return getAndAdd(delta) + delta; + } + + bool compareAndSet(T expect, T update) + { + bool result = var_.compare_exchange(bool_t<false>(), expect, update, mo_seq_cst, info_); + return result; + } + + bool weakCompareAndSet(T expect, T update) + { + bool result = var_.compare_exchange(bool_t<true>(), expect, update, mo_seq_cst, info_); + return result; + } + + T decrementAndGet() + { + return getAndAdd(-1) - 1; + } + + T getAndAdd(T delta) + { + T result = var_.rmw(rmw_type_t<rmw_type_add>(), delta, mo_seq_cst, info_); + return result; + } + + T getAndDecrement() + { + return getAndAdd(-1); + } + + T getAndIncrement() + { + return getAndAdd(+1); + } + + T getAndSet(T newValue) + { + T result = var_.rmw(rmw_type_t<rmw_type_swap>(), newValue, mo_seq_cst, info_); + return result; + } + + T incrementAndGet() + { + return getAndAdd(1) + 1; + } + +private: + jatomic<T>& var_; + debug_info info_; + + //typedef typename atomic_add_type<T>::type add_type; + template<typename Y> friend class jatomic; + + jatomic_proxy(jatomic<T>& var, debug_info_param info) + : var_(var) + , info_(info) + { + } + + jatomic_proxy& operator = (jatomic_proxy const&); +}; + + +template<typename T> +class jatomic : generic_atomic<T, true> +{ +public: + typedef jatomic_proxy<T> proxy_t; + friend class jatomic_proxy<T>; + + jatomic() + { + } + + jatomic(T value) + { + //??? whether here must be mo_relaxed or mo_release? + this->store(value, mo_seq_cst, $); + } + + jatomic(jatomic const& r) + { + T const value = r.load(mo_seq_cst, $); + //??? whether here must be mo_relaxed or mo_release? + this->store(value, mo_seq_cst, $); + } + + jatomic(proxy_t const& r) + { + T const value = r.var_.load(mo_seq_cst, r.info_); + //??? whether here must be mo_relaxed or mo_release? + this->store(value, mo_seq_cst, r.info_); + } + + proxy_t operator () (debug_info_param info) + { + return proxy_t(*this, info); + } +}; + + +typedef jatomic<int> AtomicInteger; +typedef jatomic<long> AtomicLong; + + + + + + +} + +#endif diff --git a/libs/relacy/relacy/java_var.hpp b/libs/relacy/relacy/java_var.hpp @@ -0,0 +1,157 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_JAVA_VAR_HPP +#define RL_JAVA_VAR_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "atomic.hpp" + + +namespace rl +{ + +template<typename T> class jvar; + + +template<typename T> +class jvar_proxy +{ +public: + typedef typename atomic_add_type<T>::type add_type; + template<typename Y> friend class jvar; + + operator T () const + { + return load(); + } + + T operator = (T value) + { + store(value); + return value; + } + + T operator = (jvar_proxy const& r) + { + T const value = r.load(); + store(value); + return *this; + } + + T operator ++ (int) + { + T tmp = load(); + store(tmp + 1); + return tmp; + } + + T operator -- (int) + { + T tmp = load(); + store(tmp - 1); + return tmp; + } + + T operator ++ () + { + T tmp = load(); + store(tmp + 1); + return tmp + 1; + } + + T operator -- () + { + T tmp = load(); + store(tmp - 1); + return tmp - 1; + } + + T operator += (add_type value) + { + T tmp = load(); + store(tmp + value); + return tmp + value; + } + + T operator -= (add_type value) + { + T tmp = load(); + store(tmp - value); + return tmp - value; + } + +private: + jvar<T>& var_; + debug_info info_; + + jvar_proxy(jvar<T>& var, debug_info_param info) + : var_(var) + , info_(info) + { + } + + T load() const + { + return var_.load(mo_relaxed, info_); + } + + void store(T value) + { + var_.store(value, mo_relaxed, info_); + } +}; + + + + +template<typename T> +class jvar : generic_atomic<T, true> +{ +public: + typedef jvar_proxy<T> proxy_t; + friend class jvar_proxy<T>; + + jvar() + { + } + + jvar(T value) + { + this->store(value, mo_relaxed, $); + } + + jvar(jvar const& r) + { + T const value = r.load(mo_relaxed, $); + this->store(value, mo_relaxed, $); + } + + jvar(proxy_t const& r) + { + T const value = r.load(); + this->store(value, mo_relaxed, r.info_); + } + + proxy_t operator () (debug_info_param info) + { + return proxy_t(*this, info); + } + +private: + jvar& operator = (jvar const&); +}; + + +} + +#endif diff --git a/libs/relacy/relacy/java_volatile.hpp b/libs/relacy/relacy/java_volatile.hpp @@ -0,0 +1,158 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_JAVA_VOLATILE_HPP +#define RL_JAVA_VOLATILE_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "atomic.hpp" + + +namespace rl +{ + +template<typename T> class jvolatile; + + +template<typename T> +class jvolatile_proxy +{ +public: + typedef typename atomic_add_type<T>::type add_type; + template<typename Y> friend class jvolatile; + + operator T () const + { + return load(); + } + + T operator = (T value) + { + store(value); + return value; + } + + T operator = (jvolatile_proxy const& r) + { + T const value = r.load(); + store(value); + return *this; + } + + T operator ++ (int) + { + T tmp = load(); + store(tmp + 1); + return tmp; + } + + T operator -- (int) + { + T tmp = load(); + store(tmp - 1); + return tmp; + } + + T operator ++ () + { + T tmp = load(); + store(tmp + 1); + return tmp + 1; + } + + T operator -- () + { + T tmp = load(); + store(tmp - 1); + return tmp - 1; + } + + T operator += (add_type value) + { + T tmp = load(); + store(tmp + value); + return tmp + value; + } + + T operator -= (add_type value) + { + T tmp = load(); + store(tmp - value); + return tmp - value; + } + +private: + jvolatile<T>& var_; + debug_info info_; + + jvolatile_proxy(jvolatile<T>& var, debug_info_param info) + : var_(var) + , info_(info) + { + } + + T load() const + { + return var_.load(mo_seq_cst, info_); + } + + void store(T value) + { + var_.store(value, mo_seq_cst, info_); + } +}; + + + + +template<typename T> +class jvolatile : generic_atomic<T, true> +{ +public: + typedef jvolatile_proxy<T> proxy_t; + friend class jvolatile_proxy<T>; + + jvolatile() + { + } + + explicit jvolatile(T value) + { + //??? whether here must be mo_relaxed or mo_release? + this->store(value, mo_seq_cst, $); + } + + jvolatile(jvolatile const& r) + { + T const value = r.load(mo_seq_cst, $); + //??? whether here must be mo_relaxed or mo_release? + this->store(value, mo_seq_cst, $); + } + + jvolatile(proxy_t const& r) + { + T const value = r.var_.load(mo_seq_cst, r.info_); + //??? whether here must be mo_relaxed or mo_release? + this->store(value, mo_seq_cst, r.info_); + } + + proxy_t operator () (debug_info_param info) + { + return proxy_t(*this, info); + } +}; + + + +} + +#endif diff --git a/libs/relacy/relacy/memory.hpp b/libs/relacy/relacy/memory.hpp @@ -0,0 +1,241 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_MEMORY_HPP +#define RL_MEMORY_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" + + +namespace rl +{ + + +class memory_mgr : nocopy<> +{ +public: + memory_mgr() + { + memset(deferred_free_, 0, sizeof(deferred_free_)); + memset(deferred_free_size_, 0, sizeof(deferred_free_size_)); + deferred_index_ = 0; + } + + ~memory_mgr() + { + /* + while (allocs_.size()) + { + size_t* p = (size_t*)(allocs_.begin()->first); + free(p - 1, false); + allocs_.erase(allocs_.begin()); + } + */ + } + +#ifndef RL_GC + void* alloc(size_t size) +#else + void* alloc(size_t size, void (*dtor)(void*)) +#endif + { + void* pp = 0; + for (size_t i = 0; i != alloc_cache_.size(); ++i) + { + if (alloc_cache_[i].first == size) + { + if (alloc_cache_[i].second.size()) + { + pp = alloc_cache_[i].second.top(); + alloc_cache_[i].second.pop(); + } + break; + } + } + if (0 == pp) + pp = (::malloc)(size + alignment); + + if (pp) + { + RL_VERIFY(alignment >= sizeof(void*)); + *(size_t*)pp = size; + void* p = (char*)pp + alignment; +#ifndef RL_GC + allocs_.insert(std::make_pair(p, size)); +#else + alloc_desc_t desc = {p, size, dtor}; + gc_allocs_.push_back(desc); +#endif + return p; + } + else + { + throw std::bad_alloc(); + } + } + + bool free(void* pp, bool defer) + { + if (0 == pp) + return true; + +#ifndef RL_GC + map<void*, size_t>::type::iterator iter = allocs_.find(pp); + if (allocs_.end() == iter) + return false; + + allocs_.erase(iter); + + void* p = (char*)pp - alignment; + size_t size = *(size_t*)p; + + if (defer) + { + deferred_free_[deferred_index_ % deferred_count] = p; + deferred_free_size_[deferred_index_ % deferred_count] = size; + deferred_index_ += 1; + p = deferred_free_[deferred_index_ % deferred_count]; + size = deferred_free_size_[deferred_index_ % deferred_count]; + if (p) + rl_free_impl(p, size); + } + else + { + rl_free_impl(p, size); + } + return true; +#else + (void)defer; + for (size_t i = 0; i != gc_allocs_.size(); ++i) + { + alloc_desc_t const& desc = gc_allocs_[i]; + if (desc.addr == pp) + { + void* p = (char*)desc.addr - alignment; + rl_free_impl(p, desc.size); + gc_allocs_.erase(gc_allocs_.begin() + i); + return true; + } + } + return false; +#endif + } + + bool iteration_end() + { +#ifndef RL_GC + return allocs_.empty(); +#else + for (size_t i = 0; i != gc_allocs_.size(); ++i) + { + alloc_desc_t const& desc = gc_allocs_[i]; + if (desc.dtor) + desc.dtor(desc.addr); + void* p = (char*)desc.addr - alignment; + rl_free_impl(p, desc.size); + } + gc_allocs_.clear(); + return true; +#endif + } + +#ifndef RL_GC + void output_allocs(std::ostream& stream) + { + stream << "memory allocations:" << std::endl; + map<void*, size_t>::type::iterator iter = allocs_.begin(); + map<void*, size_t>::type::iterator end = allocs_.end(); + for (; iter != end; ++iter) + { + stream << iter->first << " [" << (unsigned)iter->second << "]" << std::endl; + } + stream << std::endl; + } +#endif + +private: + typedef stack<void*>::type freelist_t; + typedef std::pair<size_t, freelist_t> alloc_entry_t; + typedef vector<alloc_entry_t>::type alloc_t; + + static size_t const deferred_count = 64; + + alloc_t alloc_cache_; + size_t deferred_index_; + void* deferred_free_ [deferred_count]; + size_t deferred_free_size_ [deferred_count]; + +#ifndef RL_GC + map<void*, size_t>::type allocs_; +#else + struct alloc_desc_t + { + void* addr; + size_t size; + void (*dtor)(void*); + }; + vector<alloc_desc_t>::type gc_allocs_; +#endif + + void rl_free_impl(void* p, size_t size) + { + bool found = false; + for (size_t i = 0; i != alloc_cache_.size(); ++i) + { + if (alloc_cache_[i].first == size) + { + found = true; + alloc_cache_[i].second.push(p); + break; + } + } + if (!found) + { + alloc_cache_.push_back(std::make_pair(size, freelist_t())); + alloc_cache_.back().second.push(p); + } + } +}; + + + + +struct memory_alloc_event +{ + void* addr_; + size_t size_; + bool is_array_; + + void output(std::ostream& s) const + { + s << "memory allocation: addr=" << std::hex << (void*)((char*)addr_ + (is_array_ ? alignment : 0)) << std::dec + << ", size=" << (unsigned)size_; + } +}; + + +struct memory_free_event +{ + void* addr_; + bool is_array_; + + void output(std::ostream& s) const + { + s << "memory deallocation: addr=" << std::hex << (void*)((char*)addr_ + (is_array_ ? alignment : 0)) << std::dec; + } +}; + + + +} + +#endif diff --git a/libs/relacy/relacy/memory_order.hpp b/libs/relacy/relacy/memory_order.hpp @@ -0,0 +1,54 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_MEMORY_ORDER_HPP +#define RL_MEMORY_ORDER_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" + + +namespace rl +{ + + +enum memory_order +{ + mo_relaxed, + mo_consume, + mo_acquire, + mo_release, + mo_acq_rel, + mo_seq_cst, +}; + + + + +inline char const* format(memory_order mo) +{ + switch (mo) + { + case mo_relaxed: return "relaxed"; + case mo_consume: return "consume"; + case mo_acquire: return "acquire"; + case mo_release: return "release"; + case mo_acq_rel: return "acq_rel"; + case mo_seq_cst: return "seq_cst"; + } + RL_VERIFY(!"invalid value of memory order"); + throw std::logic_error("invalid value of memory order"); +} + + +} + +#endif diff --git a/libs/relacy/relacy/pch.hpp b/libs/relacy/relacy/pch.hpp @@ -0,0 +1,76 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_PCH_HPP +#define RL_PCH_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#ifndef _CRT_SECURE_NO_WARNINGS +# define _CRT_SECURE_NO_WARNINGS 1 +#endif + +#ifdef _FORTIFY_SOURCE +# undef _FORTIFY_SOURCE +#endif + +#ifndef _XOPEN_SOURCE +# define _XOPEN_SOURCE +#endif + +#include <stdlib.h> +#include <stdio.h> +#include <stddef.h> +#include <limits.h> +#include <memory.h> +#include <string.h> + +#include <typeinfo> +#include <iostream> +#include <sstream> +#include <algorithm> +#include <stdexcept> +#include <utility> +#include <iterator> +#include <memory> +#include <vector> +#include <queue> +#include <string> +#include <stack> +#include <set> +#include <map> +#include <new> + +#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) +# define RL_WIN +#endif + +#if defined(RL_WIN) || defined(_CYGWIN) +# ifndef _WIN32_WINNT +# define _WIN32_WINNT 0x0500 +# endif +# define WIN32_LEAN_AND_MEAN +# include <windows.h> +# include <process.h> +# ifdef RL_WIN +# include <intrin.h> +# else +# include <stdint.h> +# include <sys/times.h> +# endif +#else +# include <stdint.h> +# include <sys/times.h> +# include <unistd.h> +# include <ucontext.h> +# include <setjmp.h> +#endif + +#endif diff --git a/libs/relacy/relacy/platform.hpp b/libs/relacy/relacy/platform.hpp @@ -0,0 +1,257 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_PLATFORM_HPP +#define RL_PLATFORM_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "pch.hpp" + + +#if defined(RL_WIN) || defined(_CYGWIN) + +typedef void* fiber_t; + +inline unsigned get_tick_count() +{ + return GetTickCount(); +} + +inline void set_low_thread_prio() +{ + SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_BELOW_NORMAL); +} + +inline void create_main_fiber(fiber_t& fib) +{ + fib = ConvertThreadToFiber(0); + if (0 == fib) + { + unsigned long err = ::GetLastError(); (void)err; + throw std::logic_error("you must start simulation inside a thread (not a fiber)"); + } +} + +inline void delete_main_fiber(fiber_t& fib) +{ + (void)fib; + HMODULE lib = LoadLibraryW(L"kernel32.dll"); + if (lib) + { + void* proc = (void*)GetProcAddress(lib, "ConvertFiberToThread"); + if (proc) + { + typedef BOOL (WINAPI * ConvertFiberToThreadT)(); + ConvertFiberToThreadT ConvertFiberToThread = (ConvertFiberToThreadT)proc; + ConvertFiberToThread(); + } + FreeLibrary(lib); + } +} + +inline void create_fiber(fiber_t& fib, void(*fiber_proc)(void*), void* ctx) +{ + size_t const stack_size = 64*1024; + fib = CreateFiberEx(4*1024, stack_size, 0, (LPFIBER_START_ROUTINE)fiber_proc, ctx); + if (fib == 0) + throw std::runtime_error("error creating fiber"); +} + +inline void delete_fiber(fiber_t& fib) +{ + DeleteFiber(fib); +} + +inline void switch_to_fiber(fiber_t fib, fiber_t) +{ + SwitchToFiber(fib); +} + +// work-around for some versions of cygwin +extern "C" inline int __gxx_personality_v0() +{ + return 0; +} + +#ifdef RL_WIN +#else + +/* +inline unsigned get_tick_count() +{ + return GetTickCount(); +} + +typedef void* fiber_t; + +struct ucontext_t +{ + struct stack_t + { + void* ss_sp; + size_t ss_size; + }; + stack_t uc_stack; + void* uc_link; + +}; +void getcontext(void*) {} +void makecontext(void*, void(*)(), int, void*) {} +void swapcontext(void*, void*) {} + +*/ + +#endif + +#else + +inline unsigned get_tick_count() +{ + struct tms tms; + return ((unsigned)(times (&tms) * (1000 / sysconf(_SC_CLK_TCK)))); +} + +inline void set_low_thread_prio() +{ +} + +#if 0 + +typedef ucontext_t fiber_t; + +inline void create_main_fiber(fiber_t& fib) +{ + ucontext_t f = {}; + fib = f; +} + +inline void delete_main_fiber(fiber_t& fib) +{ + (void)fib; +} + +inline void create_fiber(fiber_t& fib, void(*fiber_proc)(void*), void* ctx) +{ + size_t const stack_size = 64*1024; + getcontext(&fib); + fib.uc_stack.ss_sp = (::malloc)(stack_size); + fib.uc_stack.ss_size = stack_size; + fib.uc_link = 0; + typedef void(*fn_t)(); + fn_t fn = (fn_t)fiber_proc; + makecontext(&fib, fn, 1, ctx); +} + +inline void delete_fiber(fiber_t& fib) +{ + //(::free)(fib.uc_stack.ss_sp); +} + +inline void switch_to_fiber(fiber_t& fib, fiber_t& prev) +{ + swapcontext(&prev, &fib); +} + +#else + +struct fiber_t +{ + ucontext_t fib; + jmp_buf jmp; +}; + +struct fiber_ctx_t +{ + void(* fnc)(void*); + void* ctx; + jmp_buf* cur; + ucontext_t* prv; +}; + +static void fiber_start_fnc(void* p) +{ + fiber_ctx_t* ctx = (fiber_ctx_t*)p; + void (*volatile ufnc)(void*) = ctx->fnc; + void* volatile uctx = ctx->ctx; + if (_setjmp(*ctx->cur) == 0) + { + ucontext_t tmp; + swapcontext(&tmp, ctx->prv); + } + ufnc(uctx); +} + +inline void create_main_fiber(fiber_t& fib) +{ + memset(&fib, 0, sizeof(fib)); +} + +inline void delete_main_fiber(fiber_t& fib) +{ + (void)fib; +} + +inline void create_fiber(fiber_t& fib, void(*ufnc)(void*), void* uctx) +{ + size_t const stack_size = 64*1024; + getcontext(&fib.fib); + fib.fib.uc_stack.ss_sp = (::malloc)(stack_size); + fib.fib.uc_stack.ss_size = stack_size; + fib.fib.uc_link = 0; + ucontext_t tmp; + fiber_ctx_t ctx = {ufnc, uctx, &fib.jmp, &tmp}; + makecontext(&fib.fib, (void(*)())fiber_start_fnc, 1, &ctx); + swapcontext(&tmp, &fib.fib); +} + +inline void delete_fiber(fiber_t& fib) +{ + //(::free)(fib.uc_stack.ss_sp); +} + +inline void switch_to_fiber(fiber_t& fib, fiber_t& prv) +{ + if (_setjmp(prv.jmp) == 0) + _longjmp(fib.jmp, 1); +} + +#endif + +#endif + + + +#ifdef _MSC_VER + typedef unsigned __int64 uint64_t; +# define RL_INLINE __forceinline +# define RL_NOINLINE __declspec(noinline) +# define RL_STRINGIZE(text) RL_STRINGIZE_A((text)) +# define RL_STRINGIZE_I(text) #text +# define RL_STRINGIZE_A(arg) RL_STRINGIZE_I arg +# define RL_STDCALL __stdcall +#else +# define RL_INLINE inline +# define RL_NOINLINE +# define RL_STRINGIZE_I(text) #text +# define RL_STRINGIZE(text) RL_STRINGIZE_I(text) +# define RL_STDCALL +#endif + + +#if defined (_MSC_VER) && (_MSC_VER >= 1400) +# define RL_RESTRICT __restrict +#else +# define RL_RESTRICT +#endif + + + +#endif diff --git a/libs/relacy/relacy/pthread.h b/libs/relacy/relacy/pthread.h @@ -0,0 +1,21 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_PTHREAD_IFACE_HPP +#define RL_PTHREAD_IFACE_HPP +#ifdef _MSC_VER +# pragma once +#endif + + +#include "relacy.hpp" +#include "stdlib/pthread.hpp" + + +#endif diff --git a/libs/relacy/relacy/random.hpp b/libs/relacy/relacy/random.hpp @@ -0,0 +1,55 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_RANDOM_HPP +#define RL_RANDOM_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" + + +namespace rl +{ + + +unsigned const primes[16] = {1, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53}; + +struct random_generator +{ + unsigned k; + unsigned c; + unsigned x; + + void seed(iteration_t s) + { + k = ((unsigned)(s >> 32) & 0xf) + 8; + c = primes[((unsigned)(s >> 36) & 0xf)]; + x = (unsigned)((s + 1) * 0x95949347 + c); + } + + unsigned rand() + { + return ((x = x + c + (x << k)) >> 16); + } + + template<typename T, T max> + RL_INLINE + T get() + { + return static_cast<T>(rand() % max); + } +}; + + + +} + +#endif diff --git a/libs/relacy/relacy/random_scheduler.hpp b/libs/relacy/relacy/random_scheduler.hpp @@ -0,0 +1,141 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_RANDOM_SCHEDULER_HPP +#define RL_RANDOM_SCHEDULER_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "scheduler.hpp" +#include "random.hpp" + + +namespace rl +{ + + +template<thread_id_t thread_count> +class random_scheduler : public scheduler<random_scheduler<thread_count>, scheduler_thread_info, thread_count> +{ +public: + typedef scheduler<random_scheduler<thread_count>, scheduler_thread_info, thread_count> base_t; + typedef typename base_t::thread_info_t thread_info_t; + typedef typename base_t::shared_context_t shared_context_t; + + struct task_t + { + }; + + random_scheduler(test_params& params, shared_context_t& ctx, thread_id_t dynamic_thread_count) + : base_t(params, ctx, dynamic_thread_count) + { + } + + thread_id_t iteration_begin_impl() + { + rand_.seed(this->iter_); + unpark_reason reason; + return schedule_impl(reason, false); + } + + bool iteration_end_impl() + { + return this->iter_ == this->params_.iteration_count; + } + + thread_id_t schedule_impl(unpark_reason& reason, unsigned /*yield*/) + { + thread_id_t const running_thread_count = this->running_threads_count; + + thread_id_t timed_thread_count = this->timed_thread_count_; + if (timed_thread_count) + { + thread_id_t cnt = running_thread_count ? timed_thread_count * 4 : timed_thread_count; + thread_id_t idx = rand_.rand() % cnt; + if (idx < timed_thread_count) + { + thread_info_t* thr = this->timed_threads_[idx]; + thread_id_t th = thr->index_; + RL_VERIFY(1 == thr->block_count_); + this->unpark_thread(th); + RL_VERIFY(thr->state_ == thread_state_running); + reason = unpark_reason_timeout; + return th; + } + } + + thread_id_t spurious_thread_count = this->spurious_thread_count_; + if (spurious_thread_count && running_thread_count) + { + thread_id_t cnt = spurious_thread_count * 8; + thread_id_t idx = rand_.rand() % cnt; + if (idx < spurious_thread_count) + { + thread_info_t* thr = this->spurious_threads_[idx]; + thread_id_t th = thr->index_; + RL_VERIFY(1 == thr->block_count_); + this->unpark_thread(th); + RL_VERIFY(thr->state_ == thread_state_running); + reason = unpark_reason_spurious; + return th; + } + } + + RL_VERIFY(running_thread_count); + unsigned index = rand_.rand() % running_thread_count; + thread_id_t th = this->running_threads[index]; + reason = unpark_reason_normal; + return th; + } + + unsigned rand_impl(unsigned limit, sched_type t) + { + (void)t; + unsigned r = rand_.rand() % limit; + ///!!! +#ifdef RL_MY_TEST + if (this->iter_ == 8761115) + { + char buf [1024]; + sprintf(buf, "rand(%u, %u) = %u\n", t, limit, r); + OutputDebugStringA(buf); + } +#endif + return r; + } + + iteration_t iteration_count_impl() + { + return this->params_.iteration_count; + } + + void get_state_impl(std::ostream& /*ss*/) + { + } + + void set_state_impl(std::istream& /*ss*/) + { + } + + void on_thread_block(thread_id_t /*th*/, bool /*yield*/) + { + } + +private: + random_generator rand_; + + RL_NOCOPY(random_scheduler); +}; + + +} + +#endif diff --git a/libs/relacy/relacy/relacy.hpp b/libs/relacy/relacy/relacy.hpp @@ -0,0 +1,73 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_RELACY_HPP +#define RL_RELACY_HPP +#ifdef _MSC_VER +# pragma once +#endif + + +#include "base.hpp" +#include "context.hpp" +#include "context_base_impl.hpp" +#include "backoff.hpp" +#include "atomic_fence.hpp" +#include "atomic.hpp" +#include "var.hpp" +#include "thread_local.hpp" +#include "test_suite.hpp" +#include "dyn_thread.hpp" + +#include "stdlib/mutex.hpp" +#include "stdlib/condition_variable.hpp" +#include "stdlib/semaphore.hpp" +#include "stdlib/event.hpp" + +#include "stdlib/windows.hpp" +#include "stdlib/pthread.hpp" + +#define VAR_T(x) rl::var<x> +#define TLS_T(T) rl::thread_local_var<T> +#define VAR(x) x($) + +#ifndef RL_FORCE_SEQ_CST +#define memory_order_relaxed mo_relaxed, $ +#define memory_order_consume mo_consume, $ +#define memory_order_acquire mo_acquire, $ +#define memory_order_release mo_release, $ +#define memory_order_acq_rel mo_acq_rel, $ +#define memory_order_seq_cst mo_seq_cst, $ +#else +#define memory_order_relaxed mo_seq_cst, $ +#define memory_order_consume mo_seq_cst, $ +#define memory_order_acquire mo_seq_cst, $ +#define memory_order_release mo_seq_cst, $ +#define memory_order_acq_rel mo_seq_cst, $ +#define memory_order_seq_cst mo_seq_cst, $ +#endif + +#define new RL_NEW_PROXY +#define delete RL_DELETE_PROXY +#define malloc(sz) rl::rl_malloc((sz), $) +#define calloc(sz, cnt) rl::rl_calloc((sz), (cnt), $) +#define realloc(p, sz) rl::rl_realloc((p), (sz), $) +#define free(p) rl::rl_free((p), $) + +#ifdef assert +#undef assert +#endif +#define assert RL_ASSERT + +#ifdef errno +#undef errno +#endif +#define errno (rl::get_errno()) + +#endif diff --git a/libs/relacy/relacy/relacy_cli.hpp b/libs/relacy/relacy/relacy_cli.hpp @@ -0,0 +1,29 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_RELACY_CLI_HPP +#define RL_RELACY_CLI_HPP +#ifdef _MSC_VER +# pragma once +#endif + + +#define RL_CLI_MODE + +#include "relacy.hpp" + +#include "cli.hpp" +#include "cli_interlocked.hpp" +#include "cli_volatile.hpp" +#include "cli_var.hpp" + + + + +#endif diff --git a/libs/relacy/relacy/relacy_java.hpp b/libs/relacy/relacy/relacy_java.hpp @@ -0,0 +1,29 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_RELACY_JAVA_HPP +#define RL_RELACY_JAVA_HPP +#ifdef _MSC_VER +# pragma once +#endif + + +#define RL_JAVA_MODE + +#include "relacy.hpp" + +#include "java.hpp" +#include "java_atomic.hpp" +#include "java_volatile.hpp" +#include "java_var.hpp" + + + + +#endif diff --git a/libs/relacy/relacy/relacy_std.hpp b/libs/relacy/relacy/relacy_std.hpp @@ -0,0 +1,82 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_RELACY_STD_HPP +#define RL_RELACY_STD_HPP +#ifdef _MSC_VER +# pragma once +#endif + + +#include "relacy.hpp" + + +namespace std +{ + using rl::memory_order; + using rl::mo_relaxed; + using rl::mo_consume; + using rl::mo_acquire; + using rl::mo_release; + using rl::mo_acq_rel; + using rl::mo_seq_cst; + + using rl::atomic; + using rl::atomic_thread_fence; + using rl::atomic_signal_fence; + + using rl::atomic_bool; + using rl::atomic_address; + + using rl::atomic_char; + using rl::atomic_schar; + using rl::atomic_uchar; + using rl::atomic_short; + using rl::atomic_ushort; + using rl::atomic_int; + using rl::atomic_uint; + using rl::atomic_long; + using rl::atomic_ulong; + using rl::atomic_llong; + using rl::atomic_ullong; +// using rl::atomic_char16_t; +// using rl::atomic_char32_t; + using rl::atomic_wchar_t; + +// using rl::atomic_int_least8_t; +// using rl::atomic_uint_least8_t; +// using rl::atomic_int_least16_t; +// using rl::atomic_uint_least16_t; +// using rl::atomic_int_least32_t; +// using rl::atomic_uint_least32_t; +// using rl::atomic_int_least64_t; +// using rl::atomic_uint_least64_t; +// using rl::atomic_int_fast8_t; +// using rl::atomic_uint_fast8_t; +// using rl::atomic_int_fast16_t; +// using rl::atomic_uint_fast16_t; +// using rl::atomic_int_fast32_t; +// using rl::atomic_uint_fast32_t; +// using rl::atomic_int_fast64_t; +// using rl::atomic_uint_fast64_t; + using rl::atomic_intptr_t; + using rl::atomic_uintptr_t; + using rl::atomic_size_t; +// using rl::atomic_ssize_t; + using rl::atomic_ptrdiff_t; +// using rl::atomic_intmax_t; +// using rl::atomic_uintmax_t; + + using rl::mutex; + using rl::recursive_mutex; + using rl::condition_variable; + using rl::condition_variable_any; +} + +#endif diff --git a/libs/relacy/relacy/rmw.hpp b/libs/relacy/relacy/rmw.hpp @@ -0,0 +1,101 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_RMW_HPP +#define RL_RMW_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" + + +namespace rl +{ + + +enum rmw_type_e +{ + rmw_type_swap, + rmw_type_add, + rmw_type_sub, + rmw_type_and, + rmw_type_or, + rmw_type_xor, +}; + + + + +inline char const* format(rmw_type_e t) +{ + switch (t) + { + case rmw_type_swap: return "exchange"; + case rmw_type_add: return "fetch_add"; + case rmw_type_sub: return "fetch_sub"; + case rmw_type_and: return "fetch_and"; + case rmw_type_or: return "fetch_or"; + case rmw_type_xor: return "fetch_xor"; + } + RL_VERIFY(!"invalid rmw type"); + throw std::logic_error("invalid rmw type"); +} + + + + +template<rmw_type_e type> struct rmw_type_t {}; + + + + +template<typename T, typename Y> +T perform_rmw(rmw_type_t<rmw_type_swap>, T v, Y op) +{ + (void)v; + return op; +} + +template<typename T, typename Y> +T perform_rmw(rmw_type_t<rmw_type_add>, T v, Y op) +{ + return v + op; +} + +template<typename T, typename Y> +T perform_rmw(rmw_type_t<rmw_type_sub>, T v, Y op) +{ + return v - op; +} + +template<typename T, typename Y> +T perform_rmw(rmw_type_t<rmw_type_and>, T v, Y op) +{ + return v & op; +} + +template<typename T, typename Y> +T perform_rmw(rmw_type_t<rmw_type_or>, T v, Y op) +{ + return v | op; +} + +template<typename T, typename Y> +T perform_rmw(rmw_type_t<rmw_type_xor>, T v, Y op) +{ + return v ^ op; +} + + + +} + + +#endif diff --git a/libs/relacy/relacy/scheduler.hpp b/libs/relacy/relacy/scheduler.hpp @@ -0,0 +1,332 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_SCHEDULER_HPP +#define RL_SCHEDULER_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "context_base.hpp" + + +namespace rl +{ + + +enum thread_state_e +{ + thread_state_running, + thread_state_blocked, + thread_state_finished, +}; + +enum thread_finish_result +{ + thread_finish_result_normal, + thread_finish_result_last, + thread_finish_result_deadlock, +}; + + + +struct scheduler_thread_info +{ + thread_id_t index_; + unsigned block_count_; + thread_state_e state_; + + void reset(test_params& /*params*/) + { + block_count_ = 0; + state_ = thread_state_running; + } +}; + + + + +template<typename derived_t, typename thread_info_type, thread_id_t thread_count> +class scheduler : nocopy<> +{ +public: + typedef thread_info_type thread_info_t; + + struct shared_context_t + { + typedef typename derived_t::task_t task_t; + //CRITICAL_SECTION guard_; + queue<task_t> queue_; + }; + + scheduler(test_params& params, shared_context_t& ctx, thread_id_t dynamic_thread_count) + : params_(params) + , ctx_(ctx) + , total_dynamic_threads_(dynamic_thread_count) + , iter_() + , thread_() + { + for (thread_id_t i = 0; i != thread_count; ++i) + { + threads_[i].index_ = i; + } + } + + thread_id_t iteration_begin(iteration_t iter) + { + iter_ = iter; + running_threads_count = thread_count; + finished_thread_count_ = 0; + timed_thread_count_ = 0; + spurious_thread_count_ = 0; + dynamic_thread_count_ = 0; + + for (thread_id_t i = 0; i != thread_count; ++i) + { + running_threads.push_back(i); + threads_[i].reset(params_); + } + + for (thread_id_t i = thread_count - total_dynamic_threads_; i != thread_count; ++i) + { + dynamic_threads_[dynamic_thread_count_++] = &threads_[i]; + block_thread(i, false); + } + + thread_id_t const th = self().iteration_begin_impl(); + + thread_ = &threads_[th]; + + return th; + } + + bool iteration_end() + { + bool const finish = self().iteration_end_impl(); + + thread_ = 0; + + return finish; + } + + thread_id_t schedule(unpark_reason& reason, unsigned yield) + { + thread_id_t const th = self().schedule_impl(reason, yield); + + RL_VERIFY(threads_[th].state_ == thread_state_running); + thread_ = &threads_[th]; + + return th; + } + + RL_INLINE + unsigned rand(unsigned limit, sched_type t) + { + RL_VERIFY(limit); + return self().rand_impl(limit, t); + } + + iteration_t iteration_count() + { + return self().iteration_count_impl(); + } + + bool park_current_thread(bool is_timed, bool allow_spurious_wakeup) + { + if (is_timed) + { + timed_threads_[timed_thread_count_++] = thread_; + RL_VERIFY(timed_thread_count_ <= thread_count); + } + + if (allow_spurious_wakeup) + { + spurious_threads_[spurious_thread_count_++] = thread_; + RL_VERIFY(spurious_thread_count_ <= thread_count); + } + + block_thread(thread_->index_, true); + + return is_deadlock() ? false : true; + } + + void unpark_thread(thread_id_t th, bool do_switch = false) + { + (void)do_switch; + unblock_thread(th); + + thread_info_t& t = threads_[th]; + + //!!! store flag as to whether thread is spurious blocked in thread object + // (to eliminate iteration over all threads) + for (thread_id_t i = 0; i != spurious_thread_count_; ++i) + { + if (spurious_threads_[i] == &t) + { + for (thread_id_t j = i + 1; j != spurious_thread_count_; ++j) + spurious_threads_[j - 1] = spurious_threads_[j]; + spurious_thread_count_ -= 1; + break; + } + } + + //!!! store flag as to whether thread is spurious blocked in thread object + for (thread_id_t i = 0; i != timed_thread_count_; ++i) + { + if (timed_threads_[i] == &t) + { + for (thread_id_t j = i + 1; j != timed_thread_count_; ++j) + timed_threads_[j - 1] = timed_threads_[j]; + timed_thread_count_ -= 1; + break; + } + } + } + + thread_finish_result thread_finished() + { + RL_VERIFY(thread_->state_ == thread_state_running); + block_thread(thread_->index_, false); + thread_->state_ = thread_state_finished; + finished_thread_count_ += 1; + self().thread_finished_impl(); +retry: + if (finished_thread_count_ == thread_count) + { + return thread_finish_result_last; + } + else if (is_deadlock()) + { + if (dynamic_thread_count_) + { + while (dynamic_thread_count_) + { + thread_info_t* th = dynamic_threads_[--dynamic_thread_count_]; + unblock_thread(th->index_); + } + goto retry; + } + return thread_finish_result_deadlock; + } + else + { + return thread_finish_result_normal; + } + } + + thread_id_t create_thread() + { + RL_VERIFY(dynamic_thread_count_); + thread_info_t* th = dynamic_threads_[--dynamic_thread_count_]; + unblock_thread(th->index_); + return th->index_; + } + + void get_state(std::ostream& ss) + { + self().get_state_impl(ss); + } + + void set_state(std::istream& ss) + { + self().set_state_impl(ss); + } + +protected: + test_params& params_; + shared_context_t& ctx_; + thread_id_t const total_dynamic_threads_; + iteration_t iter_; + + aligned<thread_info_t> threads_ [thread_count]; + thread_info_t* thread_; + + vector<thread_id_t>::type running_threads; + thread_id_t running_threads_count; + thread_id_t finished_thread_count_; + + //!!! doesn't timed/spurious waits must belong to full scheduler? + // hyphotesis: random scheduler can ignore timed/spurious waits + // (however must detect deadlock with spurious threads) + thread_info_t* timed_threads_ [thread_count]; + thread_id_t timed_thread_count_; + + thread_info_t* spurious_threads_ [thread_count]; + thread_id_t spurious_thread_count_; + + thread_info_t* dynamic_threads_ [thread_count]; + thread_id_t dynamic_thread_count_; + + void block_thread(thread_id_t th, bool yield) + { + RL_VERIFY(th < thread_count); + thread_info_t& t = threads_[th]; + RL_VERIFY(t.state_ != thread_state_finished); + if (t.block_count_++) + return; + + for (thread_id_t i = 0; i != running_threads_count; ++i) + { + if (running_threads[i] == th) + { + running_threads.erase(running_threads.begin() + i); + running_threads_count -= 1; + t.state_ = thread_state_blocked; + self().on_thread_block(th, yield); + return; + } + } + RL_VERIFY(false); + } + + bool unblock_thread(thread_id_t th) + { + RL_VERIFY(th < thread_count); + thread_info_t& t = threads_[th]; + RL_VERIFY(t.state_ == thread_state_blocked); + if (--t.block_count_) + return false; + + running_threads.push_back(th); + running_threads_count += 1; + t.state_ = thread_state_running; + return true; + } + +private: + derived_t& self() + { + return *static_cast<derived_t*>(this); + } + + bool is_deadlock() + { + if ((0 == running_threads_count) && (0 == timed_thread_count_)) + { + self().purge_blocked_threads(); + if ((0 == running_threads_count) && (0 == timed_thread_count_)) + return true; + } + return false; + } + + void thread_finished_impl() + { + } + + void purge_blocked_threads() + { + } +}; + + +} + +#endif diff --git a/libs/relacy/relacy/signature.hpp b/libs/relacy/relacy/signature.hpp @@ -0,0 +1,84 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_SIGNATURE_HPP +#define RL_SIGNATURE_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "test_result.hpp" +#include "context_base.hpp" + + +namespace rl +{ + + +template<unsigned magic> +class signature +{ +public: + signature() + : magic_(magic) + { + } + + signature(signature const&) + : magic_(magic) + { + } + + ~signature() + { + check(RL_INFO); + magic_ = 0; + } + + void check(debug_info_param info) const + { + if ( + ((uintptr_t)this <= (uintptr_t)-1 - 4096) && + ((uintptr_t)this >= 4096) && + ((uintptr_t)this % sizeof(unsigned) == 0) && (magic == magic_)) + { + return; + } + else + { + fail(info); + } + } + +private: + unsigned magic_; + + struct fault_event + { + void const* addr_; + void output(std::ostream& s) const + { + s << "<" << std::hex << addr_ << std::dec << ">" + << " access to freed memory"; + } + }; + + RL_NOINLINE void fail(debug_info_param info) const + { + context& c = ctx(); + RL_HIST(fault_event) {this} RL_HIST_END(); + rl::ctx().fail_test("access to freed memory", test_result_access_to_freed_memory, info); + } +}; + + +} + +#endif diff --git a/libs/relacy/relacy/slab_allocator.hpp b/libs/relacy/relacy/slab_allocator.hpp @@ -0,0 +1,157 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_SLAB_ALLOCATOR_HPP +#define RL_SLAB_ALLOCATOR_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" + + +namespace rl +{ + + +template<typename type> +class slab_allocator : nocopy<> +{ +public: + slab_allocator() + : freelist_() + , blocks_() + , alloc_count_() + { + } + + ~slab_allocator() + { + char* pos = blocks_; + while (pos) + { + char* const next = *reinterpret_cast<char**>(pos); + ::free(pos); + pos = next; + } + } + + type* alloc(void* ctx = 0) + { + if (freelist_) + { + type* p = freelist_; + freelist_ = *reinterpret_cast<type**>(p); + alloc_count_ += 1; + *(void**)p = ctx; + type* pp = reinterpret_cast<type*>((reinterpret_cast<void**>(p) + 1)); + return pp; + } + else + { + return alloc_batch(); + } + } + + void free(type* p) + { + type** pos = reinterpret_cast<type**>((reinterpret_cast<void**>(p) - 1)); + pos[0] = freelist_; + freelist_ = reinterpret_cast<type*>(pos); + alloc_count_ -= 1; + } + + bool iteration_end() + { +#ifndef RL_GC + return alloc_count_ == 0; +#else + freelist_ = 0; + size_t elem_size = sizeof(void*) + sizeof(type); + elem_size = (elem_size + 15) & ~15; + char* pos = blocks_; + while (pos) + { + char* p = pos; + p += elem_size; + for (size_t i = 0; i != batch_size; ++i) + { + *reinterpret_cast<type**>(p) = freelist_; + freelist_ = reinterpret_cast<type*>(p); + p += elem_size; + } + pos = *reinterpret_cast<char**>(pos); + } + return true; +#endif + } + + void output_allocs(std::ostream& stream) + { + size_t elem_size = sizeof(void*) + sizeof(type); + elem_size = (elem_size + 15) & ~15; + set<void*>::type allocs; + char* pos = blocks_; + while (pos) + { + char* p = pos; + p += elem_size; + for (size_t i = 0; i != batch_size; ++i) + { + allocs.insert(p); + p += elem_size; + } + pos = *reinterpret_cast<char**>(pos); + } + set<void*>::type avail; + type* pos2 = freelist_; + while (pos2) + { + avail.insert(pos2); + pos2 = *reinterpret_cast<type**>(pos2); + } + vector<void*>::type diff; + std::set_difference(allocs.begin(), allocs.end(), avail.begin(), avail.end(), std::back_inserter(diff)); + for (size_t i = 0; i != diff.size(); ++i) + { + stream << *(void**)diff[i] << std::endl; + } + } + +private: + static size_t const batch_size = 128; + type* freelist_; + char* blocks_; + size_t alloc_count_; + + RL_NOINLINE type* alloc_batch() + { + size_t elem_size = sizeof(void*) + sizeof(type); + elem_size = (elem_size + 15) & ~15; + char* const batch = (char*)(::malloc)(elem_size * (batch_size + 1)); + if (0 == batch) + throw std::bad_alloc(); + *reinterpret_cast<char**>(batch) = blocks_; + blocks_ = batch; + char* p = batch; + p += elem_size; + for (size_t i = 0; i != batch_size; ++i) + { + *reinterpret_cast<type**>(p) = freelist_; + freelist_ = reinterpret_cast<type*>(p); + p += elem_size; + } + return alloc(); + } +}; + + +} + +#endif diff --git a/libs/relacy/relacy/stdlib/condition_variable.hpp b/libs/relacy/relacy/stdlib/condition_variable.hpp @@ -0,0 +1,372 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_CONDITION_VARIABLE_HPP +#define RL_CONDITION_VARIABLE_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "../base.hpp" +#include "../context_base.hpp" +#include "../waitset.hpp" +#include "../signature.hpp" + + +namespace rl +{ + +struct mutex_wrapper +{ + virtual void lock(debug_info_param info) const = 0; + virtual void unlock(debug_info_param info) const = 0; + virtual ~mutex_wrapper() {} +}; + +template<typename mutex_t> +class mutex_wrapper_impl : public mutex_wrapper +{ +public: + mutex_wrapper_impl(mutex_t& m) + : m_(m) + { + } + +private: + mutex_t& m_; + + virtual void lock(debug_info_param info) const + { + m_.lock(info); + } + + virtual void unlock(debug_info_param info) const + { + m_.unlock(info); + } + + RL_NOCOPY(mutex_wrapper_impl); +}; + +struct pred_wrapper +{ + virtual bool exec() const = 0; + virtual ~pred_wrapper() {} +}; + +template<typename pred_t> +class pred_wrapper_impl : public pred_wrapper +{ +public: + pred_wrapper_impl(pred_t p) + : p_(p) + { + } + +private: + mutable pred_t p_; + + virtual bool exec() const + { + return p_(); + } + + RL_NOCOPY(pred_wrapper_impl); +}; + + +struct condvar_data +{ + virtual void notify_one(debug_info_param info) = 0; + virtual void notify_all(debug_info_param info) = 0; + virtual sema_wakeup_reason wait(mutex_wrapper const& lock, bool is_timed, debug_info_param info) = 0; + virtual bool wait(mutex_wrapper const& lock, pred_wrapper const& pred, bool is_timed, debug_info_param info) = 0; + virtual ~condvar_data() {} // just to calm down gcc +}; + +template<thread_id_t thread_count> +class condvar_data_impl : public condvar_data +{ +public: + condvar_data_impl(bool allow_spurious_wakeups) + { + spurious_wakeup_limit_ = 0; + if (allow_spurious_wakeups && ctx().is_random_sched()) + spurious_wakeup_limit_ = 10; + } + + ~condvar_data_impl() + { + //!!! detect destoy when there are blocked threads + } + +private: + waitset<thread_count> ws_; + signature<0xc0ffe3ad> sign_; + int spurious_wakeup_limit_; + + struct event_t + { + enum type_e + { + type_notify_one, + type_notify_all, + type_wait_enter, + type_wait_exit, + type_wait_pred_enter, + type_wait_pred_exit, + }; + + condvar_data_impl const* var_addr_; + type_e type_; + thread_id_t thread_count_; + unpark_reason reason_; + + void output(std::ostream& s) const + { + s << "<" << std::hex << var_addr_ << std::dec << "> cond_var: "; + switch (type_) + { + case type_notify_one: + s << "notify one total_blocked=" << thread_count_ << " unblocked=" << (thread_count_ ? 1 : 0); + break; + case type_notify_all: + s << "notify all unblocked=" << thread_count_; + break; + case type_wait_enter: s << "wait enter"; break; + case type_wait_exit: + s << "wait exit"; + if (unpark_reason_normal == reason_) + s << " due to notified"; + else if (unpark_reason_timeout == reason_) + s << " due to timeout"; + else if (unpark_reason_spurious == reason_) + s << " spuriously"; + break; + case type_wait_pred_enter: s << "wait pred enter"; break; + case type_wait_pred_exit: s << "wait pred exit"; break; + } + } + }; + + virtual void notify_one(debug_info_param info) + { + context& c = ctx(); + //??? do I need this scheduler call? + c.sched(); + sign_.check(info); + RL_HIST(event_t) {this, event_t::type_notify_one, ws_.size()} RL_HIST_END(); + ws_.unpark_one(c, info); + } + + virtual void notify_all(debug_info_param info) + { + context& c = ctx(); + //??? do I need this scheduler call? + c.sched(); + sign_.check(info); + RL_HIST(event_t) {this, event_t::type_notify_all, ws_.size()} RL_HIST_END(); + ws_.unpark_all(c, info); + } + + virtual sema_wakeup_reason wait(mutex_wrapper const& lock, bool is_timed, debug_info_param info) + { + //!!! detect whether mutex is the same + context& c = ctx(); + sign_.check(info); + RL_HIST(event_t) {this, event_t::type_wait_enter} RL_HIST_END(); + lock.unlock(info); + sign_.check(info); + bool allow_spurious_wakeup = (spurious_wakeup_limit_ > 0); + unpark_reason reason = ws_.park_current(c, is_timed, allow_spurious_wakeup, false, info); + if (reason == unpark_reason_spurious) + spurious_wakeup_limit_ -= 1; + RL_HIST(event_t) {this, event_t::type_wait_exit, 0, reason} RL_HIST_END(); + lock.lock(info); + sign_.check(info); + if (reason == unpark_reason_normal) + return sema_wakeup_reason_success; + else if (reason == unpark_reason_spurious) + return sema_wakeup_reason_spurious; + else //if (reason == unpark_reason_timeout) + return sema_wakeup_reason_timeout; + } + + virtual bool wait(mutex_wrapper const& lock, pred_wrapper const& pred, bool is_timed, debug_info_param info) + { + context& c = ctx(); + sign_.check(info); + RL_HIST(event_t) {this, event_t::type_wait_pred_enter} RL_HIST_END(); + while (!pred.exec()) + { + sema_wakeup_reason reason = wait(lock, is_timed, info); + if (reason == sema_wakeup_reason_timeout) + { + RL_HIST(event_t) {this, event_t::type_wait_pred_exit} RL_HIST_END(); + return pred.exec(); + } + } + RL_HIST(event_t) {this, event_t::type_wait_pred_exit} RL_HIST_END(); + return true; + } +}; + + +template<typename tag_t> +class condvar +{ +public: + condvar() + : impl_() + { + } + + condvar(condvar const&) + : impl_() + { + } + + condvar& operator = (condvar const&) + { + return *this; + } + + ~condvar() + { + } + + void init(bool allow_spurious_wakeups, debug_info_param info) + { + context& c = ctx(); + RL_ASSERT_IMPL(0 == impl_, test_result_double_initialization_of_condvar, "", info); + sign_.check(info); + impl_ = c.condvar_ctor(allow_spurious_wakeups); + } + + void deinit(debug_info_param info) + { + context& c = ctx(); + check(info); + c.condvar_dtor(impl_); + impl_ = 0; + } + + void notify_one(debug_info_param info) + { + check(info); + impl_->notify_one(info); + } + + void notify_all(debug_info_param info) + { + check(info); + impl_->notify_all(info); + } + + template<typename lock_t> + sema_wakeup_reason wait(lock_t& lock, bool is_timed, debug_info_param info) + { + check(info); + mutex_wrapper_impl<lock_t> w (lock); + return impl_->wait(w, is_timed, info); + } + + template<typename lock_t, typename pred_t> + bool wait(mutex_wrapper const& lock, pred_wrapper const& pred, bool is_timed, debug_info_param info) + { + check(info); + return impl_->wait(mutex_wrapper_impl<lock_t>(lock), pred_wrapper_impl<pred_t>(pred), is_timed, info); + } + +private: + condvar_data* impl_; + signature<0xbadc0ffe> sign_; + + void check(debug_info_param info) + { + RL_ASSERT_IMPL(impl_, test_result_usage_of_non_initialized_condvar, "", info); + sign_.check(info); + } +}; + + + +template<typename tag_t> +class condition_variable_std : condvar<tag_t> +{ +public: + condition_variable_std() + { + condvar<tag_t>::init(true, $); + } + + ~condition_variable_std() + { + condvar<tag_t>::deinit($); + } + + void notify_one(debug_info_param info) + { + condvar<tag_t>::notify_one(info); + } + + void notify_all(debug_info_param info) + { + condvar<tag_t>::notify_all(info); + } + + template<typename lock_t> + void wait(lock_t& lock, debug_info_param info) + { + condvar<tag_t>::wait(lock, false, info); + } + + template<typename lock_t, typename pred_t> + void wait(lock_t& lock, pred_t pred, debug_info_param info) + { + condvar<tag_t>::wait(lock, pred, false, info); + } + + template<typename lock_t, typename abs_time_t> + bool wait_until(lock_t& lock, abs_time_t const&, debug_info_param info) + { + return condvar<tag_t>::wait(lock, true, info); + } + + template<typename lock_t, typename abs_time_t, typename pred_t> + bool wait_until(lock_t& lock, abs_time_t const&, pred_t pred, debug_info_param info) + { + return condvar<tag_t>::wait(lock, pred, true, info); + } + + template<typename lock_t, typename rel_time_t> + bool wait_for(lock_t& lock, rel_time_t const&, debug_info_param info) + { + sema_wakeup_reason reason = condvar<tag_t>::wait(lock, true, info); + return reason == sema_wakeup_reason_success; + } + + template<typename lock_t, typename rel_time_t, typename pred_t> + bool wait_for(lock_t& lock, rel_time_t const&, pred_t pred, debug_info_param info) + { + return condvar<tag_t>::wait(lock, pred, true, info); + } + + RL_NOCOPY(condition_variable_std); +}; + + +struct condvar_tag_std; +typedef condition_variable_std<condvar_tag_std> condition_variable; +struct condvar_tag_std_any; +typedef condition_variable_std<condvar_tag_std_any> condition_variable_any; + +} + +#endif diff --git a/libs/relacy/relacy/stdlib/event.hpp b/libs/relacy/relacy/stdlib/event.hpp @@ -0,0 +1,386 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_EVENT_HPP +#define RL_EVENT_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "../base.hpp" +#include "../context_base.hpp" +#include "../sync_var.hpp" +#include "../waitset.hpp" +#include "semaphore.hpp" + + +namespace rl +{ + + +struct event_data +{ + virtual void set(debug_info_param info) = 0; + virtual void reset(debug_info_param info) = 0; + virtual void pulse(debug_info_param info) = 0; + virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info) = 0; + virtual bool is_signaled(debug_info_param info) = 0; + virtual void memory_acquire(debug_info_param info) = 0; + virtual void* prepare_wait(debug_info_param info) = 0; + virtual ~event_data() {} // just to calm down gcc +}; + + + + +template<thread_id_t thread_count> +class event_data_impl : public event_data +{ +public: + event_data_impl(bool manual_reset, bool initial_state) + : manual_reset_(manual_reset) + , state_(initial_state) + { + } + + ~event_data_impl() + { + //!!! detect destuction with waiters + } + +private: + signature<0xdada1234> sign_; + bool const manual_reset_; + bool state_; + waitset<thread_count> ws_; + sync_var<thread_count> sync_; + + struct state_event + { + enum type + { + type_set, + type_reset, + type_pulse, + }; + + event_data_impl* addr_; + type type_; + bool initial_state_; + bool final_state_; + thread_id_t unblocked_; + + void output(std::ostream& s) const + { + s << "<" << std::hex << addr_ << std::dec << "> event: "; + if (type_set == type_) + s << "set "; + else if (type_reset == type_) + s << "reset "; + else + s << "pulse "; + s << "initial_state=" << initial_state_ + << " final_state=" << final_state_; + if (type_reset != type_) + s << " unblocked=" << unblocked_; + } + + }; + + virtual void set(debug_info_param info) + { + context& c = ctx(); + c.sched(); + sign_.check(info); + + bool initial_state = state_; + thread_id_t unblocked = 0; + + if (state_) + { + //!!! probably can break if a thread waits in wfmo + RL_VERIFY(false == ws_); + } + else + { + sync_.release(c.threadx_); + state_ = true; + + if (manual_reset_) + { + unblocked = ws_.unpark_all(c, info); + } + else + { + if (ws_.unpark_one(c, info)) + unblocked = 1; + } + } + + RL_HIST(state_event) {this, state_event::type_set, initial_state, state_, unblocked} RL_HIST_END(); + } + + virtual void reset(debug_info_param info) + { + context& c = ctx(); + c.sched(); + sign_.check(info); + + bool initial_state = state_; + + if (state_) + { + RL_VERIFY(false == ws_); + sync_.release(c.threadx_); + state_ = false; + } + + RL_HIST(state_event) {this, state_event::type_reset, initial_state, state_, 0} RL_HIST_END(); + } + + virtual void pulse(debug_info_param info) + { + context& c = ctx(); + c.sched(); + sign_.check(info); + + //??? should I model nasty caveat described in MSDN + thread_id_t unblocked = 0; + + if (state_) + { + //!!! probably can break if a thread waits in wfmo + RL_VERIFY(false == ws_); + } + else + { + sync_.release(c.threadx_); + state_ = true; + unblocked = ws_.unpark_all(c, info); + state_ = false; + } + + RL_HIST(state_event) {this, state_event::type_pulse, state_, state_, unblocked} RL_HIST_END(); + } + + struct wait_event + { + event_data_impl* addr_; + bool try_wait_; + bool is_timed_; + bool initial_state_; + bool final_state_; + sema_wakeup_reason reason_; + + void output(std::ostream& s) const + { + s << "<" << std::hex << addr_ << std::dec << "> event: "; + if (try_wait_) + s << "try_wait "; + else if (is_timed_) + s << "timed wait "; + else + s << "wait "; + + if (reason_ == sema_wakeup_reason_success) + s << "succeeded "; + else if (reason_ == sema_wakeup_reason_failed) + s << "failed "; + else if (reason_ == sema_wakeup_reason_timeout) + s << "timed out "; + else if (reason_ == sema_wakeup_reason_spurious) + s << "spuriously failed "; + + s << "initial_state=" << initial_state_ + << " final_state=" << final_state_; + } + }; + + virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info) + { + context& c = ctx(); + c.sched(); + sign_.check(info); + + bool initial_state = state_; + sema_wakeup_reason reason = sema_wakeup_reason_success; + + for (;;) + { + if (state_) + { + if (manual_reset_) + { + sync_.acquire(c.threadx_); + } + else + { + state_ = false; + sync_.acq_rel(c.threadx_); + } + reason = sema_wakeup_reason_success; + break; + } + + if (try_wait) + { + sync_.acquire(c.threadx_); + reason = sema_wakeup_reason_failed; + break; + } + + unpark_reason wr = ws_.park_current(c, is_timed, false, true, info); + initial_state = state_; + if (unpark_reason_timeout == wr) + { + sync_.acquire(c.threadx_); + reason = sema_wakeup_reason_timeout; + break; + } + else if (unpark_reason_normal == wr) + { + RL_VERIFY(state_ == true); + if (manual_reset_) + { + sync_.acquire(c.threadx_); + } + else + { + state_ = false; + sync_.acq_rel(c.threadx_); + } + c.switch_back(info); + reason = sema_wakeup_reason_success; + break; + } + RL_VERIFY(false); + } + + RL_HIST(wait_event) {this, try_wait, is_timed, initial_state, state_, reason} RL_HIST_END(); + return reason; + } + + virtual bool is_signaled(debug_info_param info) + { + (void)info; + return state_; + } + + virtual void memory_acquire(debug_info_param info) + { + (void)info; + sync_.acquire(ctx().threadx_); + } + + virtual void* prepare_wait(debug_info_param info) + { + (void)info; + return &ws_; + } + + RL_NOCOPY(event_data_impl); +}; + + + +class generic_event : public win_waitable_object +{ +public: + generic_event() + : impl_() + { + } + + generic_event(generic_event const&) + : impl_() + { + } + + generic_event& operator = (generic_event const&) + { + return *this; + } + + void init(bool manual_reset, bool initial_state, debug_info_param info) + { + context& c = ctx(); + RL_ASSERT_IMPL(0 == impl_, test_result_double_initialization_of_event, "", info); + sign_.check(info); + impl_ = c.event_ctor(manual_reset, initial_state); + } + + void deinit(debug_info_param info) + { + context& c = ctx(); + check(info); + c.event_dtor(impl_); + impl_ = 0; + } + + void set(debug_info_param info) + { + check(info); + impl_->set(info); + } + + void reset(debug_info_param info) + { + check(info); + impl_->reset(info); + } + + void pulse(debug_info_param info) + { + check(info); + impl_->pulse(info); + } + + virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info) + { + check(info); + return impl_->wait(try_wait, is_timed, info); + } + + virtual bool signal(debug_info_param info) + { + set(info); + return true; + } + +private: + event_data* impl_; + signature<0x3390eeaa> sign_; + + event_data* check(debug_info_param info) + { + RL_ASSERT_IMPL(impl_, test_result_usage_of_non_initialized_event, "", info); + sign_.check(info); + return impl_; + } + + virtual bool is_signaled(debug_info_param info) + { + return check(info)->is_signaled(info); + } + + virtual void memory_acquire(debug_info_param info) + { + check(info)->memory_acquire(info); + } + + virtual void* prepare_wait(debug_info_param info) + { + return check(info)->prepare_wait(info); + } +}; + + +} + +#endif diff --git a/libs/relacy/relacy/stdlib/mutex.hpp b/libs/relacy/relacy/stdlib/mutex.hpp @@ -0,0 +1,674 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_MUTEX_HPP +#define RL_MUTEX_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "../base.hpp" +#include "../context.hpp" +#include "../thread.hpp" +#include "../atomic.hpp" +#include "../waitset.hpp" +#include "../signature.hpp" +#include "../sync_var.hpp" +#include "../foreach.hpp" +#include "semaphore.hpp" + + + +namespace rl +{ + +struct generic_mutex_data : nocopy<> +{ + virtual bool lock_exclusive(bool is_timed, debug_info_param info) = 0; + virtual bool try_lock_exclusive(debug_info_param info) = 0; + virtual void unlock_exclusive(debug_info_param info) = 0; + virtual void lock_shared(debug_info_param info) = 0; + virtual bool try_lock_shared(debug_info_param info) = 0; + virtual void unlock_shared(debug_info_param info) = 0; + virtual void unlock_exclusive_or_shared(debug_info_param info) = 0; + virtual bool is_signaled(debug_info_param info) = 0; + virtual void memory_acquire(debug_info_param info) = 0; + virtual void* prepare_wait(debug_info_param info) = 0; + virtual ~generic_mutex_data() {} // just to calm down gcc +}; + + +template<thread_id_t thread_count> +class generic_mutex_data_impl : public generic_mutex_data +{ +public: + struct event_t + { + enum type_e + { + type_lock, + type_unlock, + type_recursive_lock, + type_recursive_unlock, + type_failed_try_lock, + type_spuriously_failed_try_lock, + type_lock_shared, + type_unlock_shared, + type_recursive_lock_shared, + type_recursive_unlock_shared, + type_failed_try_lock_shared, + type_spuriously_failed_try_lock_shared, + type_wait, + type_destroying_owned_mutex, + }; + + generic_mutex_data_impl const* var_addr_; + type_e type_; + + void output(std::ostream& s) const + { + s << "<" << std::hex << var_addr_ << std::dec << "> mutex: "; + switch (type_) + { + case type_lock: s << "exclusive lock"; break; + case type_unlock: s << "exclusive unlock"; break; + case type_recursive_lock: s << "recursive exclusive lock"; break; + case type_recursive_unlock: s << "recursive exclusive unlock"; break; + case type_failed_try_lock: s << "failed exclusive try lock"; break; + case type_spuriously_failed_try_lock: s << "spuriously failed exclusive try lock"; break; + case type_lock_shared: s << "shared lock"; break; + case type_unlock_shared: s << "shared unlock"; break; + case type_recursive_lock_shared: s << "recursive shared lock"; break; + case type_recursive_unlock_shared: s << "recursive shared unlock"; break; + case type_failed_try_lock_shared: s << "failed shared try lock"; break; + case type_spuriously_failed_try_lock_shared: s << "spuriously failed shared try lock"; break; + case type_wait: s << "blocking"; break; + case type_destroying_owned_mutex: s << "destroying owned mutex"; break; + } + } + }; + + generic_mutex_data_impl(bool is_rw, bool is_exclusive_recursive, bool is_shared_recursive, bool failing_try_lock) + : is_rw_(is_rw) + , is_exclusive_recursive_(is_exclusive_recursive) + , is_shared_recursive_(is_shared_recursive) + , failing_try_lock_(failing_try_lock) + , exclusive_owner_(state_free) + , exclusive_recursion_count_(0) + , shared_lock_count_(0) + , try_lock_failed_() + { + context& c = ctx(); + (void)c; + RL_VERIFY(false == c.invariant_executing); + foreach<thread_count>(shared_owner_, &assign_zero); + } + + ~generic_mutex_data_impl() + { + context& c = ctx(); + RL_VERIFY(false == c.invariant_executing); + if (exclusive_owner_ != state_free + || exclusive_waitset_ + || shared_waitset_) + { + debug_info info = $; + RL_HIST(event_t) {this, event_t::type_destroying_owned_mutex} RL_HIST_END(); + RL_ASSERT_IMPL(false, test_result_destroying_owned_mutex, "", $); + } + } + + virtual bool lock_exclusive(bool is_timed, debug_info_param info) + { + context& c = ctx(); + c.sched(); + sign_.check(info); + RL_VERIFY(false == c.invariant_executing); + + thread_id_t const my_id = c.threadx_->index_; + + if (exclusive_owner_ == state_shared && shared_owner_[my_id]) + { + RL_HIST(event_t) {this, event_t::type_lock} RL_HIST_END(); + RL_ASSERT_IMPL(false, test_result_mutex_read_to_write_upgrade, "", info); + } + + if (exclusive_owner_ == my_id) + { + RL_HIST(event_t) {this, event_t::type_recursive_lock} RL_HIST_END(); + if (is_exclusive_recursive_) + { + exclusive_recursion_count_ += 1; + return true; + } + else + { + RL_ASSERT_IMPL(false, test_result_recursion_on_nonrecursive_mutex, "", info); + } + } + + for (;;) + { + if (exclusive_owner_ == state_free) + { + RL_VERIFY(exclusive_recursion_count_ == 0); + //!!! in some implementation here must be acq_rel + sync_.acquire(c.threadx_); + exclusive_recursion_count_ = 1; + exclusive_owner_ = my_id; + RL_HIST(event_t) {this, event_t::type_lock} RL_HIST_END(); + return true; + } + else + { + RL_VERIFY(my_id != exclusive_owner_); + RL_HIST(event_t) {this, event_t::type_wait} RL_HIST_END(); + unpark_reason reason = exclusive_waitset_.park_current(c, is_timed, false, false, info); + RL_VERIFY(reason != unpark_reason_spurious); + if (reason == unpark_reason_timeout) + { + sync_.acquire(c.threadx_); + return false; + } + } + + //??? c.sched(); + //sign_.check(info); + } + } + + virtual bool try_lock_exclusive(debug_info_param info) + { + context& c = ctx(); + c.sched(); + sign_.check(info); + RL_VERIFY(false == c.invariant_executing); + + thread_id_t const my_id = c.threadx_->index_; + + if (exclusive_owner_ == state_shared && shared_owner_[my_id]) + { + RL_HIST(event_t) {this, event_t::type_lock} RL_HIST_END(); + RL_ASSERT_IMPL(false, test_result_mutex_read_to_write_upgrade, "", info); + } + + if (exclusive_owner_ == my_id) + { + RL_HIST(event_t) {this, event_t::type_recursive_lock} RL_HIST_END(); + if (is_exclusive_recursive_) + { + exclusive_recursion_count_ += 1; + return true; + } + else + { + RL_ASSERT_IMPL(false, test_result_recursion_on_nonrecursive_mutex, "", info); + } + } + + if (exclusive_owner_ == state_free) + { + RL_VERIFY(exclusive_recursion_count_ == 0); + //!!! probability rand + if (true == failing_try_lock_ + && false == try_lock_failed_ + && c.rand(2, sched_type_user)) + { + try_lock_failed_ = true; + RL_HIST(event_t) {this, event_t::type_spuriously_failed_try_lock} RL_HIST_END(); + return false; + } + else + { + sync_.acquire(c.threadx_); + exclusive_recursion_count_ = 1; + exclusive_owner_ = my_id; + RL_HIST(event_t) {this, event_t::type_lock} RL_HIST_END(); + return true; + } + } + else + { + //!!! in some implementation here must be acquire + //sync_.acquire(c.threadx_); + + RL_VERIFY(my_id != exclusive_owner_); + RL_HIST(event_t) {this, event_t::type_failed_try_lock} RL_HIST_END(); + return false; + } + } + + virtual void unlock_exclusive(debug_info_param info) + { + context& c = ctx(); + c.sched(); + sign_.check(info); + RL_VERIFY(false == c.invariant_executing); + + thread_id_t const my_id = c.threadx_->index_; + + if (exclusive_owner_ != my_id) + { + RL_HIST(event_t) {this, event_t::type_unlock} RL_HIST_END(); + RL_ASSERT_IMPL(false, test_result_unlocking_mutex_wo_ownership, "", info); + } + + exclusive_recursion_count_ -= 1; + if (exclusive_recursion_count_) + { + RL_VERIFY(is_exclusive_recursive_); + RL_HIST(event_t) {this, event_t::type_recursive_unlock} RL_HIST_END(); + return; + } + + sync_.release(c.threadx_); + exclusive_owner_ = state_free; + RL_VERIFY(exclusive_recursion_count_ == 0); + + if (false == exclusive_waitset_.unpark_one(c, info)) + shared_waitset_.unpark_all(c, info); + + RL_HIST(event_t) {this, event_t::type_unlock} RL_HIST_END(); + } + + virtual void lock_shared(debug_info_param info) + { + RL_VERIFY(is_rw_); + context& c = ctx(); + c.sched(); + sign_.check(info); + RL_VERIFY(false == c.invariant_executing); + + thread_id_t const my_id = c.threadx_->index_; + + if (exclusive_owner_ == my_id) + { + RL_HIST(event_t) {this, event_t::type_lock_shared} RL_HIST_END(); + RL_ASSERT_IMPL(false, test_result_mutex_write_to_read_upgrade, "", info); + } + + if (exclusive_owner_ == state_shared && shared_owner_[my_id]) + { + RL_HIST(event_t) {this, event_t::type_recursive_lock_shared} RL_HIST_END(); + if (is_shared_recursive_) + { + shared_owner_[my_id] += 1; + shared_lock_count_ += 1; + return; + } + else + { + RL_ASSERT_IMPL(false, test_result_recursion_on_nonrecursive_mutex, "", info); + } + } + + for (;;) + { + if ((exclusive_owner_ == state_free) + || (exclusive_owner_ == state_shared + && false == exclusive_waitset_)) + { + sync_.acquire(c.threadx_); + shared_owner_[my_id] += 1; + shared_lock_count_ += 1; + exclusive_owner_ = state_shared; + RL_HIST(event_t) {this, event_t::type_lock_shared} RL_HIST_END(); + break; + } + else + { + RL_VERIFY(my_id != exclusive_owner_); + RL_HIST(event_t) {this, event_t::type_wait} RL_HIST_END(); + shared_waitset_.park_current(c, false, false, false, info); + } + + //??? c.sched(); + //sign_.check(info); + } + } + + virtual bool try_lock_shared(debug_info_param info) + { + RL_VERIFY(is_rw_); + context& c = ctx(); + c.sched(); + sign_.check(info); + RL_VERIFY(false == c.invariant_executing); + + thread_id_t const my_id = c.threadx_->index_; + + if (exclusive_owner_ == my_id) + { + RL_HIST(event_t) {this, event_t::type_lock_shared} RL_HIST_END(); + RL_ASSERT_IMPL(false, test_result_mutex_write_to_read_upgrade, "", info); + } + + if (exclusive_owner_ == state_shared && shared_owner_[my_id]) + { + RL_HIST(event_t) {this, event_t::type_recursive_lock_shared} RL_HIST_END(); + if (is_shared_recursive_) + { + shared_owner_[my_id] += 1; + shared_lock_count_ += 1; + return true; + } + else + { + RL_ASSERT_IMPL(false, test_result_recursion_on_nonrecursive_mutex, "", info); + } + } + + if ((exclusive_owner_ == state_free) + || (exclusive_owner_ == state_shared + && false == exclusive_waitset_)) + { + //!!! probability rand + if (true == failing_try_lock_ + && false == try_lock_failed_ + && c.rand(2, sched_type_user)) + { + try_lock_failed_ = true; + RL_HIST(event_t) {this, event_t::type_spuriously_failed_try_lock_shared} RL_HIST_END(); + return false; + } + else + { + sync_.acquire(c.threadx_); + shared_owner_[my_id] += 1; + shared_lock_count_ += 1; + exclusive_owner_ = state_shared; + RL_HIST(event_t) {this, event_t::type_lock_shared} RL_HIST_END(); + return true; + } + } + else + { + RL_VERIFY(my_id != exclusive_owner_); + RL_HIST(event_t) {this, event_t::type_failed_try_lock_shared} RL_HIST_END(); + return false; + } + } + + virtual void unlock_shared(debug_info_param info) + { + RL_VERIFY(is_rw_); + context& c = ctx(); + c.sched(); + sign_.check(info); + RL_VERIFY(false == c.invariant_executing); + + thread_id_t const my_id = c.threadx_->index_; + + if (exclusive_owner_ != state_shared || 0 == shared_owner_[my_id]) + { + RL_HIST(event_t) {this, event_t::type_unlock_shared} RL_HIST_END(); + RL_ASSERT_IMPL(false, test_result_unlocking_mutex_wo_ownership, "", info); + } + + RL_VERIFY(shared_lock_count_); + shared_owner_[my_id] -= 1; + shared_lock_count_ -= 1; + if (shared_lock_count_ != 0) + { + if (shared_owner_[my_id]) + { + RL_VERIFY(is_shared_recursive_); + RL_HIST(event_t) {this, event_t::type_recursive_unlock_shared} RL_HIST_END(); + } + else + { + sync_.release(c.threadx_); + RL_HIST(event_t) {this, event_t::type_unlock_shared} RL_HIST_END(); + } + return; + } + + sync_.release(c.threadx_); + exclusive_owner_ = state_free; + + exclusive_waitset_.unpark_one(c, info); + + RL_HIST(event_t) {this, event_t::type_unlock_shared} RL_HIST_END(); + } + + virtual void unlock_exclusive_or_shared(debug_info_param info) + { + if (exclusive_owner_ == ctx().threadx_->index_) + unlock_exclusive(info); + else + unlock_shared(info); + } + + virtual bool is_signaled(debug_info_param info) + { + (void)info; + return (exclusive_owner_ == state_free); + } + + virtual void memory_acquire(debug_info_param info) + { + (void)info; + sync_.acquire(ctx().threadx_); + } + + virtual void* prepare_wait(debug_info_param info) + { + (void)info; + return &exclusive_waitset_; + } + +private: + static thread_id_t const state_shared = (thread_id_t)-1; + static thread_id_t const state_free = (thread_id_t)-2; + + signature<0xbabaf1f1> sign_; + bool is_rw_; + bool is_exclusive_recursive_; + bool is_shared_recursive_; + bool failing_try_lock_; + sync_var<thread_count> sync_; + thread_id_t exclusive_owner_; + unsigned exclusive_recursion_count_; + waitset<thread_count> exclusive_waitset_; + waitset<thread_count> shared_waitset_; + timestamp_t shared_owner_ [thread_count]; + unsigned shared_lock_count_; + bool try_lock_failed_; + + RL_NOCOPY(generic_mutex_data_impl); +}; + + + + +template<typename type> +class generic_mutex : public win_waitable_object +{ +public: + generic_mutex() + : impl_() + { + } + + generic_mutex(generic_mutex const&) + : impl_() + { + } + + generic_mutex& operator = (generic_mutex const&) + { + return *this; + } + + ~generic_mutex() + { + } + + void init(bool is_rw, bool is_exclusive_recursive, bool is_shared_recursive, bool failing_try_lock, debug_info_param info) + { + context& c = ctx(); + RL_ASSERT_IMPL(0 == impl_, test_result_double_initialization_of_mutex, "", info); + sign_.check(info); + impl_ = c.mutex_ctor(is_rw, is_exclusive_recursive, is_shared_recursive, failing_try_lock); + } + + void deinit(debug_info_param info) + { + context& c = ctx(); + check(info); + c.mutex_dtor(impl_); + impl_ = 0; + } + + void lock(debug_info_param info) + { + lock_exclusive(info); + } + + bool lock_exclusive_timed(debug_info_param info) + { + return check(info)->lock_exclusive(true, info); + } + + void unlock(debug_info_param info) + { + unlock_exclusive(info); + } + + void lock_exclusive(debug_info_param info) + { + check(info)->lock_exclusive(false, info); + } + + bool try_lock_exclusive(debug_info_param info) + { + return check(info)->try_lock_exclusive(info); + } + + void unlock_exclusive(debug_info_param info) + { + check(info)->unlock_exclusive(info); + } + + void lock_shared(debug_info_param info) + { + check(info)->lock_shared(info); + } + + bool try_lock_shared(debug_info_param info) + { + return check(info)->try_lock_shared(info); + } + + void unlock_shared(debug_info_param info) + { + check(info)->unlock_shared(info); + } + + void unlock_exclusive_or_shared(debug_info_param info) + { + check(info)->unlock_exclusive_or_shared(info); + } + +private: + generic_mutex_data* impl_; + signature<0x6A6cB03A> sign_; + + generic_mutex_data* check(debug_info_param info) + { + RL_ASSERT_IMPL(impl_, test_result_usage_of_non_initialized_mutex, "", info); + sign_.check(info); + return impl_; + } + + virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info) + { + if (try_wait) + { + if (check(info)->try_lock_exclusive(info)) + return sema_wakeup_reason_success; + else + return sema_wakeup_reason_failed; + } + else + { + if (check(info)->lock_exclusive(is_timed, info)) + return sema_wakeup_reason_success; + else + return sema_wakeup_reason_timeout; + + } + } + + virtual bool signal(debug_info_param info) + { + check(info)->unlock_exclusive(info); + return true; + } + + virtual bool is_signaled(debug_info_param info) + { + return check(info)->is_signaled(info); + } + + virtual void memory_acquire(debug_info_param info) + { + check(info)->memory_acquire(info); + } + + virtual void* prepare_wait(debug_info_param info) + { + return check(info)->prepare_wait(info); + } +}; + + + + +template<typename tag, bool is_recursive> +class std_generic_mutex : generic_mutex<tag>, nocopy<> +{ +public: + std_generic_mutex() + { + generic_mutex<tag>::init(false, is_recursive, false, true, $); + } + + ~std_generic_mutex() + { + generic_mutex<tag>::deinit($); + } + + void lock(debug_info_param info) + { + generic_mutex<tag>::lock_exclusive(info); + } + + bool try_lock(debug_info_param info) + { + return generic_mutex<tag>::try_lock_exclusive(info); + } + + void unlock(debug_info_param info) + { + generic_mutex<tag>::unlock_exclusive(info); + } +}; + + +struct mutex_tag_std; +typedef std_generic_mutex<mutex_tag_std, false> mutex; + +struct mutex_tag_std_recursive; +typedef std_generic_mutex<mutex_tag_std_recursive, true> recursive_mutex; + + +} + +#endif diff --git a/libs/relacy/relacy/stdlib/pthread.hpp b/libs/relacy/relacy/stdlib/pthread.hpp @@ -0,0 +1,588 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_PTHREAD_HPP +#define RL_PTHREAD_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "mutex.hpp" +#include "condition_variable.hpp" +#include "semaphore.hpp" + + +namespace rl +{ + +enum RL_POSIX_ERROR_CODE +{ + RL_SUCCESS, + RL_EINVAL, + RL_ETIMEDOUT, + RL_EBUSY, + RL_EINTR, + RL_EAGAIN, + RL_EWOULDBLOCK, +}; + + +inline void rl_sched_yield(debug_info_param info) +{ + yield(1, info); +} + + +typedef win_waitable_object* rl_pthread_t; +typedef void* rl_pthread_attr_t; + +inline int rl_pthread_create(rl_pthread_t* th, rl_pthread_attr_t* attr, void* (*func) (void*), void* arg, debug_info_param info) +{ + (void)attr; + (void)info;//!!! + RL_VERIFY(th && func); + th[0] = ctx().create_thread(func, arg); + return 0; +} + +inline int rl_pthread_join(rl_pthread_t th, void** res, debug_info_param info) +{ + RL_VERIFY(th && res); + res[0] = 0; //!!! + th->wait(false, false, info); + return 0; +} + + + + +struct sem_tag_pthread; +typedef semaphore<sem_tag_pthread> rl_sem_t; + +inline int rl_sem_init(rl_sem_t* sema, int /*pshared*/, unsigned int initial_count, debug_info_param info) +{ + RL_VERIFY(initial_count >= 0); + sema->init(true, initial_count, INT_MAX, info); + return 0; +} + +inline int rl_sem_destroy(rl_sem_t* sema, debug_info_param info) +{ + sema->deinit(info); + return 0; +} + +inline int rl_sem_wait(rl_sem_t* sema, debug_info_param info) +{ + sema_wakeup_reason reason = sema->wait(false, false, info); + if (reason == sema_wakeup_reason_success) + return 0; + if (reason == sema_wakeup_reason_spurious) + { + set_errno(RL_EINTR); + return -1; + } + RL_VERIFY(false); + return -1; +} + +inline int rl_sem_trywait(rl_sem_t* sema, debug_info_param info) +{ + sema_wakeup_reason reason = sema->wait(true, false, info); + if (sema_wakeup_reason_success == reason) + return 0; + if (sema_wakeup_reason_failed == reason) + { + set_errno(RL_EAGAIN); + return -1; + } + if (sema_wakeup_reason_spurious == reason) + { + set_errno(RL_EINTR); + return -1; + } + RL_VERIFY(false); + return -1; +} + +inline int rl_sem_post(rl_sem_t* sema, debug_info_param info) +{ + unsigned prev_cout = 0; + bool result = sema->post(1, prev_cout, info); + RL_VERIFY(result); + (void)result; + return 0; +} + +inline int rl_sem_getvalue(rl_sem_t* sema, int* value, debug_info_param info) +{ + RL_VERIFY(value); + if (value) + value[0] = sema->get_value(info); + return 0; +} + + + + +struct mutex_tag_pthread_mtx; +typedef generic_mutex<mutex_tag_pthread_mtx> rl_pthread_mutex_t; + +struct rl_pthread_mutexattr_t +{ + bool is_recursive_; +}; + +enum RL_PTHREAD_MUTEX_TYPE +{ + RL_PTHREAD_MUTEX_NORMAL, + RL_PTHREAD_MUTEX_ERRORCHECK, + RL_PTHREAD_MUTEX_RECURSIVE, + RL_PTHREAD_MUTEX_DEFAULT, +}; + +inline int rl_pthread_mutexattr_init(rl_pthread_mutexattr_t* attr, debug_info_param info) +{ + (void)info; + if (0 == attr) + return RL_EINVAL; + attr->is_recursive_ = false; + return 0; +} + +inline int rl_pthread_mutexattr_destroy(rl_pthread_mutexattr_t* attr, debug_info_param info) +{ + (void)info; + if (0 == attr) + return RL_EINVAL; + return 0; +} + +inline int rl_pthread_mutexattr_settype(rl_pthread_mutexattr_t* attr, int type, debug_info_param info) +{ + (void)info; + if (0 == attr) + return RL_EINVAL; + if (RL_PTHREAD_MUTEX_RECURSIVE == type) + attr->is_recursive_ = true; + return 0; +} + +inline int rl_pthread_mutex_init(rl_pthread_mutex_t* m, rl_pthread_mutexattr_t const* attr, debug_info_param info) +{ + bool is_recursive = attr && attr->is_recursive_; + m->init(false, is_recursive, false, false, info); + return 0; +} + +inline int rl_pthread_mutex_destroy(rl_pthread_mutex_t* m, debug_info_param info) +{ + m->deinit(info); + return 0; +} + +inline int rl_pthread_mutex_lock(rl_pthread_mutex_t* m, debug_info_param info) +{ + m->lock_exclusive(info); + return 0; +} + +inline int rl_pthread_mutex_timedlock(rl_pthread_mutex_t* m, const void* abs_timeout, debug_info_param info) +{ + (void)abs_timeout; + bool rv = m->lock_exclusive_timed(info); + return rv ? 0 : RL_ETIMEDOUT; +} + +inline int rl_pthread_mutex_try_lock(rl_pthread_mutex_t* m, debug_info_param info) +{ + return m->try_lock_exclusive(info) ? 0 : 1; +} + +inline int rl_pthread_mutex_unlock(rl_pthread_mutex_t* m, debug_info_param info) +{ + m->unlock_exclusive(info); + return 0; +} + + + +struct mutex_tag_pthread_rwlock; +typedef generic_mutex<mutex_tag_pthread_rwlock> rl_pthread_rwlock_t; + +inline int rl_pthread_rwlock_init(rl_pthread_rwlock_t* lock, void const* /*attr*/, debug_info_param info) +{ + lock->init(true, false, true, false, info); + return 0; +} + +inline int rl_pthread_rwlock_destroy(rl_pthread_rwlock_t* lock, debug_info_param info) +{ + lock->deinit(info); + return 0; +} + +inline int rl_pthread_rwlock_rdlock(rl_pthread_rwlock_t* lock, debug_info_param info) +{ + lock->lock_shared(info); + return 0; +} + +inline int rl_pthread_rwlock_tryrdlock(rl_pthread_rwlock_t* lock, debug_info_param info) +{ + bool res = lock->try_lock_shared(info); + return res ? 0 : RL_EBUSY; +} + +inline int rl_pthread_rwlock_wrlock(rl_pthread_rwlock_t* lock, debug_info_param info) +{ + lock->lock_exclusive(info); + return 0; +} + +inline int rl_pthread_rwlock_trywrlock(rl_pthread_rwlock_t* lock, debug_info_param info) +{ + bool res = lock->try_lock_exclusive(info); + return res ? 0 : RL_EBUSY; +} + +inline int rl_pthread_rwlock_unlock(rl_pthread_rwlock_t* lock, debug_info_param info) +{ + lock->unlock_exclusive_or_shared(info); + return 0; +} + + + + +struct condvar_tag_pthread; +typedef condvar<condvar_tag_pthread> rl_pthread_cond_t; +typedef int rl_pthread_condattr_t; + +inline int rl_pthread_cond_init(rl_pthread_cond_t* cv, rl_pthread_condattr_t* /*condattr*/, debug_info_param info) +{ + cv->init(true, info); + return 0; +} + +inline int rl_pthread_cond_destroy(rl_pthread_cond_t* cv, debug_info_param info) +{ + cv->deinit(info); + return 0; +} + +inline int rl_pthread_cond_broadcast(rl_pthread_cond_t* cv, debug_info_param info) +{ + cv->notify_all(info); + return 0; +} + +inline int rl_pthread_cond_signal(rl_pthread_cond_t* cv, debug_info_param info) +{ + cv->notify_one(info); + return 0; +} + +inline int rl_pthread_cond_timedwait(rl_pthread_cond_t* cv, rl_pthread_mutex_t* m, void const* /*timespec*/, debug_info_param info) +{ + sema_wakeup_reason res = cv->wait(*m, true, info); + if (res == sema_wakeup_reason_success) + return 0; + else if (res == sema_wakeup_reason_timeout) + return RL_ETIMEDOUT; + else if (res == sema_wakeup_reason_spurious) + return RL_EINTR; + else + return RL_EINVAL; +} + +inline int rl_pthread_cond_wait(rl_pthread_cond_t* cv, rl_pthread_mutex_t* m, debug_info_param info) +{ + sema_wakeup_reason res = cv->wait(*m, false, info); + if (res == sema_wakeup_reason_success) + return 0; + else if (res == sema_wakeup_reason_spurious) + return RL_EINTR; + else + return RL_EINVAL; +} + + + + +enum RL_FUTEX_OP +{ + RL_FUTEX_WAIT, + RL_FUTEX_WAKE, +}; + +inline int rl_int_futex_impl(context& c, + atomic<int>* uaddr, + int op, + int val, + struct timespec const* timeout, + atomic<int>* uaddr2, + int val3, + debug_info_param info) +{ + if (op == RL_FUTEX_WAIT) + { + c.sched(); + c.atomic_thread_fence_seq_cst(); + int v0; + { + preemption_disabler pd (c); + v0 = uaddr->load(mo_acquire, info); + } + if (v0 != val) + return RL_EWOULDBLOCK; + unpark_reason reason = uaddr->wait(c, timeout != 0, true, info); + if (reason == unpark_reason_normal) + return 0; + else if (reason == unpark_reason_timeout) + return RL_ETIMEDOUT; + else if (reason == unpark_reason_spurious) + return RL_EINTR; + RL_VERIFY(false); + return RL_EINVAL; + } + else if (op == RL_FUTEX_WAKE) + { + if (val <= 0) + return 0; + + c.sched(); + c.atomic_thread_fence_seq_cst(); + return uaddr->wake(c, val, info); + } + else + { + return RL_EINVAL; + } +} + + struct futex_event + { + void* addr_; + int op_; + int val_; + bool timeout_; + int res_; + + void output(std::ostream& s) const + { + s << "<" << std::hex << addr_ << std::dec << "> futex(" + << (op_ == RL_FUTEX_WAIT ? "FUTEX_WAIT" : op_ == RL_FUTEX_WAKE ? "FUTEX_WAKE" : "UNSUPPORTED") << ", " + << val_ << ", " << timeout_ << ") = "; + if (op_ == RL_FUTEX_WAKE) + s << res_; + else + s << (res_ == RL_EWOULDBLOCK ? "EWOULDBLOCK" : res_ == RL_ETIMEDOUT ? "ETIMEDOUT" : res_ == RL_EINTR ? "EINTR" : "UNKNOWN"); + } + }; + +inline int rl_futex(atomic<int>* uaddr, + int op, + int val, + struct timespec const* timeout, + atomic<int>* uaddr2, + int val3, + debug_info_param info) +{ + context& c = ctx(); + int res = rl_int_futex_impl(c, uaddr, op, val, timeout, uaddr2, val3, info); + RL_HIST(futex_event) {uaddr, op, val, timeout != 0, res} RL_HIST_END(); + return res; +} + +} + + + +#ifdef EINVAL +# undef EINVAL +#endif +#define EINVAL rl::RL_EINVAL + +#ifdef ETIMEDOUT +# undef ETIMEDOUT +#endif +#define ETIMEDOUT rl::RL_ETIMEDOUT + +#ifdef EBUSY +# undef EBUSY +#endif +#define EBUSY rl::RL_EBUSY + +#ifdef EINTR +# undef EINTR +#endif +#define EINTR rl::RL_EINTR + +#ifdef EAGAIN +# undef EAGAIN +#endif +#define EAGAIN rl::RL_EAGAIN + +#ifdef EWOULDBLOCK +# undef EWOULDBLOCK +#endif +#define EWOULDBLOCK rl::RL_EWOULDBLOCK + +#define sched_yield() \ + rl::rl_sched_yield($) + +#define pthread_yield() \ + rl::rl_sched_yield($) + + + +#define pthread_t rl::rl_pthread_t +#define pthread_attr_t rl::rl_pthread_attr_t + +#define pthread_create(th, attr, func, arg) \ + rl::rl_pthread_create(th, attr, func, arg, $) + +#define pthread_join(th, res) \ + rl::rl_pthread_join(th, res, $) + + + + +#define sem_t rl::rl_sem_t + +#define sem_init(sema, pshared, initial_count)\ + rl::rl_sem_init(sema, pshared, initial_count, $) + +#define sem_destroy(sema)\ + rl::rl_sem_destroy(sema, $) + +#define sem_wait(sema)\ + rl::rl_sem_wait(sema, $) + +#define sem_trywait(sema)\ + rl::rl_sem_trywait(sema, $) + +#define sem_post(sema)\ +rl::rl_sem_post(sema, $) + +#define sem_getvalue(sema, pvalue)\ + rl::rl_sem_getvalue(sema, pvalue, $) + + + + + +#define pthread_mutex_t rl::rl_pthread_mutex_t +#define pthread_mutexattr_t rl::rl_pthread_mutexattr_t + +#ifdef PTHREAD_MUTEX_NORMAL +# undef PTHREAD_MUTEX_NORMAL +# undef PTHREAD_MUTEX_ERRORCHECK +# undef PTHREAD_MUTEX_RECURSIVE +# undef PTHREAD_MUTEX_DEFAULT +#endif + +#define PTHREAD_MUTEX_NORMAL rl::RL_PTHREAD_MUTEX_NORMAL +#define PTHREAD_MUTEX_ERRORCHECK rl::RL_PTHREAD_MUTEX_ERRORCHECK +#define PTHREAD_MUTEX_RECURSIVE rl::RL_PTHREAD_MUTEX_RECURSIVE +#define PTHREAD_MUTEX_DEFAULT rl::RL_PTHREAD_MUTEX_DEFAULT + +#define pthread_mutexattr_init(attr) \ + rl::rl_pthread_mutexattr_init(attr, $) + +#define pthread_mutexattr_destroy(attr) \ + rl::rl_pthread_mutexattr_destroy(attr, $) + +#define pthread_mutexattr_settype(attr, type) \ + rl::rl_pthread_mutexattr_settype(attr, type, $) + +#define pthread_mutex_init(m, attr) \ + rl::rl_pthread_mutex_init(m, attr, $) + +#define pthread_mutex_destroy(m) \ + rl::rl_pthread_mutex_destroy(m, $) + +#define pthread_mutex_lock(m) \ + rl::rl_pthread_mutex_lock(m, $) + +#define pthread_mutex_timedlock(m, abs_timeout) \ + rl::rl_pthread_mutex_timedlock(m, abs_timeout, $) + +#define pthread_mutex_try_lock(m) \ + rl::rl_pthread_mutex_try_lock(m, $) + +#define pthread_mutex_unlock(m) \ + rl::rl_pthread_mutex_unlock(m, $) + +#define pthread_rwlock_t rl::rl_pthread_rwlock_t + +#define pthread_rwlock_init(lock, attr) \ + rl::rl_pthread_rwlock_init(lock, attr, $) + +#define pthread_rwlock_destroy(lock) \ + rl::rl_pthread_rwlock_destroy(lock, $) + +#define pthread_rwlock_rdlock(lock) \ + rl::rl_pthread_rwlock_rdlock(lock, $) + +#define pthread_rwlock_tryrdlock(lock) \ + rl::rl_pthread_rwlock_tryrdlock(lock, $) + +#define pthread_rwlock_wrlock(lock) \ + rl::rl_pthread_rwlock_wrlock(lock, $) + +#define pthread_rwlock_trywrlock(lock) \ + rl::rl_pthread_rwlock_trywrlock(lock, $) + +#define pthread_rwlock_unlock(lock) \ + rl::rl_pthread_rwlock_unlock(lock, $) + + + + +#define pthread_cond_t rl::rl_pthread_cond_t +#define pthread_condattr_t rl::rl_pthread_condattr_t + +#define pthread_cond_init(cv, condattr) \ + rl::rl_pthread_cond_init(cv, condattr, $) + +#define pthread_cond_destroy(cv) \ + rl::rl_pthread_cond_destroy(cv, $) + +#define pthread_cond_broadcast(cv) \ + rl::rl_pthread_cond_broadcast(cv, $) + +#define pthread_cond_signal(cv) \ + rl::rl_pthread_cond_signal(cv, $) + +#define pthread_cond_timedwait(cv, m, timespec) \ + rl::rl_pthread_cond_timedwait(cv, m, timespec, $) + +#define pthread_cond_wait(cv, m) \ + rl::rl_pthread_cond_wait(cv, m, $) + + + +#ifdef FUTEX_WAKE +# undef FUTEX_WAKE +#endif +#define FUTEX_WAKE rl::RL_FUTEX_WAKE + +#ifdef FUTEX_WAIT +# undef FUTEX_WAIT +#endif +#define FUTEX_WAIT rl::RL_FUTEX_WAIT + +#define futex(uaddr, op, val, timeout, uaddr2, val3) \ + rl::rl_futex(uaddr, op, val, timeout, uaddr2, val3, $) + +#endif + + diff --git a/libs/relacy/relacy/stdlib/semaphore.hpp b/libs/relacy/relacy/stdlib/semaphore.hpp @@ -0,0 +1,558 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_SEMAPHORE_HPP +#define RL_SEMAPHORE_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "../base.hpp" +#include "../context_base.hpp" +#include "../sync_var.hpp" +#include "../waitset.hpp" +#include "../signature.hpp" + + +namespace rl +{ + +enum sema_wakeup_reason +{ + sema_wakeup_reason_success, + sema_wakeup_reason_failed, + sema_wakeup_reason_timeout, + sema_wakeup_reason_spurious, +}; + +struct win_object +{ + virtual void deinit(debug_info_param info) = 0; + virtual ~win_object() {} +}; + +struct win_waitable_object : win_object +{ + virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info) = 0; + virtual bool signal(debug_info_param info) = 0; + + virtual bool is_signaled(debug_info_param info) = 0; + virtual void memory_acquire(debug_info_param info) = 0; + virtual void* prepare_wait(debug_info_param info) = 0; +}; + + + + +struct sema_data +{ + virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info) = 0; + virtual bool post(unsigned count, unsigned& prev_count, debug_info_param info) = 0; + virtual int get_value(debug_info_param info) = 0; + virtual bool is_signaled(debug_info_param info) = 0; + virtual void memory_acquire(debug_info_param info) = 0; + virtual void* prepare_wait(debug_info_param info) = 0; + virtual ~sema_data() {} // just to calm down gcc +}; + + + + +template<thread_id_t thread_count> +class sema_data_impl : public sema_data +{ +public: + sema_data_impl(bool spurious_wakeups, unsigned initial_count, unsigned max_count) + : spurious_wakeups_(spurious_wakeups) + , count_(initial_count) + , max_count_(max_count) + { + RL_VERIFY(max_count <= INT_MAX); + } + + ~sema_data_impl() + { + //!!! detect destruction with waiters + } + + struct wait_event + { + sema_data_impl* addr_; + bool try_wait_; + bool is_timed_; + unsigned count_; + sema_wakeup_reason reason_; + + void output(std::ostream& s) const + { + s << "<" << std::hex << addr_ << std::dec << "> semaphore: "; + if (try_wait_) + s << "try_wait "; + else if (is_timed_) + s << "timed wait "; + else + s << "wait "; + + if (reason_ == sema_wakeup_reason_success) + s << "succeeded "; + else if (reason_ == sema_wakeup_reason_failed) + s << "failed "; + else if (reason_ == sema_wakeup_reason_timeout) + s << "timed out "; + else if (reason_ == sema_wakeup_reason_spurious) + s << "spuriously failed "; + + s << "new_count=" << count_; + } + }; + + struct post_event + { + sema_data_impl* addr_; + unsigned value_; + unsigned count_; + bool result_; + thread_id_t unblocked_; + + void output(std::ostream& s) const + { + s << "<" << std::hex << addr_ << std::dec << "> semaphore: "; + if (result_) + s << "post "; + else + s << "post FAILED "; + + s << "value=" << value_; + s << " new_count=" << count_; + s << " unblocked=" << unblocked_; + } + }; + + struct get_value_event + { + sema_data_impl* addr_; + unsigned count_; + + void output(std::ostream& s) const + { + s << "<" << std::hex << addr_ << std::dec << "> semaphore: "; + s << "get_value count=" << count_; + } + }; + + virtual sema_wakeup_reason wait(bool try_wait, + bool is_timed, + debug_info_param info) + { + context& c = ctx(); + c.sched(); + sign_.check(info); + + sema_wakeup_reason reason = sema_wakeup_reason_success; + for (;;) + { + if (count_) + { + count_ -= 1; + sync_.acq_rel(c.threadx_); + reason = sema_wakeup_reason_success; + break; + } + + if (try_wait) + { + sync_.acquire(c.threadx_); + reason = sema_wakeup_reason_failed; + break; + } + + unpark_reason wr = ws_.park_current(c, is_timed, spurious_wakeups_, true, info); + if (unpark_reason_timeout == wr) + { + RL_VERIFY(is_timed); + sync_.acquire(c.threadx_); + reason = sema_wakeup_reason_timeout; + break; + } + else if (unpark_reason_spurious == wr) + { + RL_VERIFY(spurious_wakeups_); + sync_.acquire(c.threadx_); + reason = sema_wakeup_reason_spurious; + break; + } + else if (unpark_reason_normal == wr) + { + RL_VERIFY(count_ > 0); + count_ -= 1; + sync_.acq_rel(c.threadx_); + c.switch_back(info); + reason = sema_wakeup_reason_success; + break; + } + RL_VERIFY(false); + } + + RL_HIST(wait_event) {this, try_wait, is_timed, count_, reason} RL_HIST_END(); + return reason; + } + + virtual bool post(unsigned count, unsigned& prev_count, debug_info_param info) + { + context& c = ctx(); + c.sched(); + sign_.check(info); + + bool result = false; + prev_count = count_; + thread_id_t unblocked = 0; + if (false == (count >= INT_MAX || count + count_ > max_count_)) + { + result = true; + count_ += count; + sync_.acq_rel(c.threadx_); + for (unsigned i = 0; i != count; ++i) + { + if (false == ws_.unpark_one(c, info)) + break; + unblocked += 1; + } + } + else + { + sync_.acquire(c.threadx_); + } + RL_HIST(post_event) {this, count, count_, result, unblocked} RL_HIST_END(); + return result; + } + + virtual int get_value(debug_info_param info) + { + context& c = ctx(); + c.sched(); + sign_.check(info); + + RL_VERIFY(count_ <= INT_MAX); + int result = (int)count_ - ws_.size(); + sync_.acquire(c.threadx_); + + RL_HIST(get_value_event) {this, (unsigned)result} RL_HIST_END(); + return result; + } + +private: + signature<0xaabb6634> sign_; + bool const spurious_wakeups_; + unsigned count_; + unsigned const max_count_; + waitset<thread_count> ws_; + sync_var<thread_count> sync_; + + virtual bool is_signaled(debug_info_param info) + { + (void)info; + return count_ > 0; + } + + virtual void memory_acquire(debug_info_param info) + { + (void)info; + sync_.acquire(ctx().threadx_); + } + + virtual void* prepare_wait(debug_info_param info) + { + (void)info; + return &ws_; + } + + RL_NOCOPY(sema_data_impl); +}; + + + +template<typename tag_t> +class semaphore : public win_waitable_object +{ +public: + semaphore() + : impl_() + { + } + + semaphore(semaphore const&) + : impl_() + { + } + + semaphore& operator = (semaphore const&) + { + return *this; + } + + void init(bool spurious_wakeups, unsigned initial_count, unsigned max_count, debug_info_param info) + { + context& c = ctx(); + RL_ASSERT_IMPL(0 == impl_, test_result_double_initialization_of_semaphore, "", info); + sign_.check(info); + impl_ = c.sema_ctor(spurious_wakeups, initial_count, max_count); + } + + void deinit(debug_info_param info) + { + context& c = ctx(); + check(info); + c.sema_dtor(impl_); + impl_ = 0; + } + + virtual sema_wakeup_reason wait(bool try_wait, bool is_timed, debug_info_param info) + { + check(info); + return impl_->wait(try_wait, is_timed, info); + } + + virtual bool signal(debug_info_param info) + { + unsigned prev_count = 0; + return post(1, prev_count, info); + } + + bool post(unsigned count, unsigned& prev_count, debug_info_param info) + { + check(info); + return impl_->post(count, prev_count, info); + } + + int get_value(debug_info_param info) + { + check(info); + return impl_->get_value(info); + } + +private: + sema_data* impl_; + signature<0x228855dd> sign_; + + sema_data* check(debug_info_param info) + { + RL_ASSERT_IMPL(impl_, test_result_usage_of_non_initialized_semaphore, "", info); + sign_.check(info); + return impl_; + } + + virtual bool is_signaled(debug_info_param info) + { + return check(info)->is_signaled(info); + } + + virtual void memory_acquire(debug_info_param info) + { + check(info)->memory_acquire(info); + } + + virtual void* prepare_wait(debug_info_param info) + { + return check(info)->prepare_wait(info); + } +}; + + + +struct wfmo_event +{ + unsigned long count_; + bool wait_all_; + bool try_wait_; + bool is_timed_; + sema_wakeup_reason result_; + size_t signaled_; + + void output(std::ostream& s) const + { + s << "WFMO: " + << "count=" << count_ + << ", wait_all=" << wait_all_ + << ", try_wait=" << try_wait_ + << ", is_timed=" << is_timed_ + << ", result="; + if (sema_wakeup_reason_success == result_) + { + s << "success"; + if (wait_all_ == false) + s << ", object=" << signaled_; + } + else + { + s << "timeout"; + } + } +}; + +size_t const wfmo_max_objects = 32; + +inline sema_wakeup_reason wait_for_multiple_objects( + size_t& signaled, + size_t count, + win_waitable_object** wo, + bool wait_all, + bool try_wait, + bool is_timed, + debug_info_param info) +{ + context& c = ctx(); + c.sched(); + + RL_VERIFY(count <= wfmo_max_objects); + void* ws [wfmo_max_objects]; + + sema_wakeup_reason result = sema_wakeup_reason_failed; + signaled = 0; + + if (wait_all) + { + for (;;) + { + unsigned long i = 0; + for (i = 0; i != count; ++i) + { + if (false == wo[i]->is_signaled(info)) + break; + } + if (i == count) + { + preemption_disabler pd (c); + for (i = 0; i != count; ++i) + { + sema_wakeup_reason r = wo[i]->wait(true, false, info); + RL_VERIFY(r == sema_wakeup_reason_success); + (void)r; + } + result = sema_wakeup_reason_success; + break; + } + else if (try_wait) + { + for (i = 0; i != count; ++i) + wo[i]->memory_acquire(info); + result = sema_wakeup_reason_timeout; + break; + } + else + { + for (i = 0; i != count; ++i) + { + ws[i] = wo[i]->prepare_wait(info); + } + unpark_reason reason = c.wfmo_park(ws, wo, (unsigned)count, !!wait_all, is_timed, info); + RL_VERIFY(unpark_reason_spurious != reason); + if (unpark_reason_timeout == reason) + { + for (i = 0; i != count; ++i) + wo[i]->memory_acquire(info); + result = sema_wakeup_reason_timeout; + break; + } + else if (unpark_reason_normal == reason) + { + { + preemption_disabler pd (c); + for (unsigned long i = 0; i != count; ++i) + { + RL_VERIFY(wo[i]->is_signaled(info)); + sema_wakeup_reason r = wo[i]->wait(true, false, info); + RL_VERIFY(r == sema_wakeup_reason_success); + (void)r; + } + } + c.switch_back(info); + result = sema_wakeup_reason_success; + break; + } + RL_VERIFY(false); + } + } + } + else + { + for (;;) + { + unsigned long i = 0; + for (i = 0; i != count; ++i) + { + if (true == wo[i]->is_signaled(info)) + break; + } + if (i != count) + { + preemption_disabler pd (c); + sema_wakeup_reason r = wo[i]->wait(true, false, info); + RL_VERIFY(r == sema_wakeup_reason_success); + (void)r; + signaled = i; + result = sema_wakeup_reason_success; + break; + } + else if (try_wait) + { + for (i = 0; i != count; ++i) + wo[i]->memory_acquire(info); + result = sema_wakeup_reason_timeout; + break; + } + else + { + for (i = 0; i != count; ++i) + { + ws[i] = wo[i]->prepare_wait(info); + } + unpark_reason reason = c.wfmo_park(ws, wo, (unsigned)count, !!wait_all, is_timed, info); + RL_VERIFY(unpark_reason_spurious != reason); + if (unpark_reason_timeout == reason) + { + for (i = 0; i != count; ++i) + wo[i]->memory_acquire(info); + result = sema_wakeup_reason_timeout; + break; + } + else if (unpark_reason_normal == reason) + { + unsigned long i = 0; + for (i = 0; i != count; ++i) + { + if (true == wo[i]->is_signaled(info)) + break; + } + RL_VERIFY(i != count); + { + preemption_disabler pd (c); + sema_wakeup_reason r = wo[i]->wait(true, false, info); + RL_VERIFY(r == sema_wakeup_reason_success); + (void)r; + } + c.switch_back(info); + signaled = i; + result = sema_wakeup_reason_success; + break; + } + RL_VERIFY(false); + } + } + } + + RL_HIST(wfmo_event) {(unsigned)count, wait_all, try_wait, is_timed, result, signaled} RL_HIST_END(); + return result; +} + + +} + + +#endif + diff --git a/libs/relacy/relacy/stdlib/windows.hpp b/libs/relacy/relacy/stdlib/windows.hpp @@ -0,0 +1,617 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_WINDOWS_HPP +#define RL_WINDOWS_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "mutex.hpp" +#include "condition_variable.hpp" +#include "semaphore.hpp" +#include "event.hpp" + + +namespace rl +{ + +typedef win_object* rl_HANDLE; +unsigned long const rl_INFINITE = (unsigned long)-1; + +unsigned long const rl_WAIT_FAILED = (unsigned long)-1; +unsigned long const rl_WAIT_OBJECT_0 = 100; +unsigned long const rl_WAIT_TIMEOUT = 1; +unsigned long const rl_WAIT_IO_COMPLETION = 2; +unsigned long const rl_MAXIMUM_WAIT_OBJECTS = wfmo_max_objects; + + +inline int rl_SwitchToThread(debug_info_param info) +{ + yield(1, info); + return 1; +} + +inline void rl_Sleep(unsigned long milliseconds, debug_info_param info) +{ + yield(milliseconds ? milliseconds : 1, info); +} + + + +inline unsigned long rl_WaitForSingleObjectEx(rl_HANDLE obj, unsigned long timeout, int alertable, debug_info_param info) +{ + (void)alertable; //!!! not yet supported – support it! + //!!! support WAIT_IO_COMPLETION + RL_VERIFY(false == alertable && "Alertable wait is not supported in WaitForSingleObject() yet"); + + bool try_wait = (timeout == 0); + bool is_timed = (timeout != rl_INFINITE); + sema_wakeup_reason reason = static_cast<win_waitable_object*>(obj)->wait(try_wait, is_timed, info); + if (reason == sema_wakeup_reason_success) + return rl_WAIT_OBJECT_0; + else if (reason == sema_wakeup_reason_timeout) + return rl_WAIT_TIMEOUT; + else if (reason == sema_wakeup_reason_failed) + return rl_WAIT_TIMEOUT; + RL_VERIFY(false); + return rl_WAIT_FAILED; +} + +inline unsigned long rl_WaitForSingleObject(rl_HANDLE obj, unsigned long timeout, debug_info_param info) +{ + return rl_WaitForSingleObjectEx(obj, timeout, 0, info); +} + +inline unsigned long rl_WaitForMultipleObjectsEx(unsigned long count, rl_HANDLE* objects, int wait_all, unsigned long timeout, int alertable, debug_info_param info) +{ + (void)alertable; //!!! + //!!! support WAIT_IO_COMPLETION + RL_VERIFY(false == alertable && "Alertable wait is not supported in WaitForMultipleObjects() yet"); + + bool try_wait = (timeout == 0); + bool is_timed = (timeout != rl_INFINITE); + win_waitable_object** obj = reinterpret_cast<win_waitable_object**>(objects); + size_t signaled = 0; + sema_wakeup_reason reason = wait_for_multiple_objects(signaled, count, obj, !!wait_all, try_wait, is_timed, info); + if (reason == sema_wakeup_reason_success) + return rl_WAIT_OBJECT_0 + (int)signaled; + else if (reason == sema_wakeup_reason_timeout) + return rl_WAIT_TIMEOUT; + RL_VERIFY(false); + return rl_WAIT_FAILED; +} + +inline unsigned long rl_WaitForMultipleObjects(unsigned long count, rl_HANDLE* objects, int wait_all, unsigned long timeout, debug_info_param info) +{ + return rl_WaitForMultipleObjectsEx(count, objects, wait_all, timeout, 0, info); +} + +inline unsigned long rl_SignalObjectAndWait(rl_HANDLE obj_to_signal, + rl_HANDLE obj_to_wait, + unsigned long timeout, + int alertable, + debug_info_param info) +{ + bool result = static_cast<win_waitable_object*>(obj_to_signal)->signal(info); + if (false == result) + return result ? 1 : 0; + preemption_disabler pd (ctx()); + return rl_WaitForSingleObjectEx(obj_to_wait, timeout, alertable, info); +} + + + +struct sem_tag_win; + +inline rl_HANDLE rl_CreateSemaphore(void* /*security*/, long initial_count, long max_count, void const* /*name*/, debug_info_param info) +{ + void* mem = ctx().alloc(sizeof(semaphore<sem_tag_win>), false, info); + semaphore<sem_tag_win>* sema = new (mem) semaphore<sem_tag_win>; + sema->init(false, initial_count, max_count, info); + return sema; +} + +inline int rl_CloseHandle(rl_HANDLE h, debug_info_param info) +{ + h->deinit(info); + h->~win_object(); + (ctx().free)(h, false, info); //!!! rename free because of the define + return 1; +} + +inline int rl_ReleaseSemaphore(rl_HANDLE sema, long count, long* prev_count, debug_info_param info) +{ + unsigned prev = 0; + bool result = static_cast<semaphore<sem_tag_win>*>(sema)->post(count, prev, info); + if (prev_count) + prev_count[0] = prev; + return result ? 1 : 0; +} + + + + +inline rl_HANDLE rl_CreateEvent(void* /*security*/, int manual_reset, int initial_state, void const* /*name*/, debug_info_param info) +{ + void* mem = ctx().alloc(sizeof(generic_event), false, info); + generic_event* ev = new (mem) generic_event; + ev->init(!!manual_reset, !!initial_state, info); + return ev; +} + +inline int rl_SetEvent(rl_HANDLE ev, debug_info_param info) +{ + static_cast<generic_event*>(ev)->set(info); + return 1; +} + +inline int rl_ResetEvent(rl_HANDLE ev, debug_info_param info) +{ + static_cast<generic_event*>(ev)->reset(info); + return 1; +} + +inline int rl_PulseEvent(rl_HANDLE ev, debug_info_param info) +{ + static_cast<generic_event*>(ev)->pulse(info); + return 1; +} + + + +struct mutex_tag_win_cs; +typedef generic_mutex<mutex_tag_win_cs> rl_CRITICAL_SECTION; + +inline void rl_InitializeCriticalSection(rl_CRITICAL_SECTION* m, debug_info_param info) +{ + m->init(false, true, false, false, info); +} + +inline int rl_InitializeCriticalSectionAndSpinCount(rl_CRITICAL_SECTION* m, unsigned long spin_count, debug_info_param info) +{ + (void)spin_count; + m->init(false, true, false, false, info); + return 1; +} + +inline int rl_InitializeCriticalSectionEx(rl_CRITICAL_SECTION* m, unsigned long spin_count, unsigned long flags, debug_info_param info) +{ + (void)spin_count; + (void)flags; + m->init(false, true, false, false, info); + return 1; +} + +inline void rl_DeleteCriticalSection(rl_CRITICAL_SECTION* m, debug_info_param info) +{ + m->deinit(info); +} + +inline void rl_EnterCriticalSection(rl_CRITICAL_SECTION* m, debug_info_param info) +{ + m->lock_exclusive(info); +} + +inline int rl_TryEnterCriticalSection(rl_CRITICAL_SECTION* m, debug_info_param info) +{ + return m->try_lock_exclusive(info) ? 1 : 0; +} + +inline void rl_LeaveCriticalSection(rl_CRITICAL_SECTION* m, debug_info_param info) +{ + m->unlock_exclusive(info); +} + +struct mutex_tag_win_srwl; +typedef generic_mutex<mutex_tag_win_srwl> rl_SRWLOCK; + +inline void rl_InitializeSRWLock(rl_SRWLOCK* lock, debug_info_param info) +{ + lock->init(true, false, false, false, info); +} + +inline void rl_AcquireSRWLockExclusive(rl_SRWLOCK* lock, debug_info_param info) +{ + lock->lock_exclusive(info); +} + +inline void rl_AcquireSRWLockShared(rl_SRWLOCK* lock, debug_info_param info) +{ + lock->lock_shared(info); +} + +inline void rl_ReleaseSRWLockExclusive(rl_SRWLOCK* lock, debug_info_param info) +{ + lock->unlock_exclusive(info); +} + +inline void rl_ReleaseSRWLockShared(rl_SRWLOCK* lock, debug_info_param info) +{ + lock->unlock_shared(info); +} + +//!!! +inline void rl_DeleteSRWLock(rl_SRWLOCK* lock, debug_info_param info) +{ + lock->deinit(info); +} + + +struct mutex_tag_win_mutex; +typedef generic_mutex<mutex_tag_win_mutex> rl_win_mutex; + + +inline rl_HANDLE rl_CreateMutex(void* /*security*/, int initial_owner, void const* /*name*/, debug_info_param info) +{ + void* mem = ctx().alloc(sizeof(rl_win_mutex), false, info); + rl_win_mutex* mtx = new (mem) rl_win_mutex (); + mtx->init(false, true, false, false, info); + if (initial_owner) + mtx->lock_exclusive(info); + return mtx; +} + +inline int rl_ReleaseMutex(rl_HANDLE mtx, debug_info_param info) +{ + static_cast<rl_win_mutex*>(mtx)->unlock_exclusive(info); + return 1; + +} + + + +struct condvar_tag_win; +typedef condvar<condvar_tag_win> rl_CONDITION_VARIABLE; +unsigned long const rl_CONDITION_VARIABLE_LOCKMODE_SHARED = 1; + +inline void rl_InitializeConditionVariable(rl_CONDITION_VARIABLE* cv, debug_info_param info) +{ + cv->init(false, info); +} + +inline int rl_SleepConditionVariableCS(rl_CONDITION_VARIABLE* cv, rl_CRITICAL_SECTION* cs, unsigned long ms, debug_info_param info) +{ + cv->wait(*cs, ms != rl_INFINITE, info); + return 0; +} + +inline int rl_SleepConditionVariableSRW(rl_CONDITION_VARIABLE* cv, rl_SRWLOCK* lock, unsigned long ms, unsigned long flags, debug_info_param info) +{ + //!!! CONDITION_VARIABLE_LOCKMODE_SHARED + (void)flags; + cv->wait(*lock, ms != rl_INFINITE, info); + return 0; +} + +inline void rl_WakeAllConditionVariable(rl_CONDITION_VARIABLE* cv, debug_info_param info) +{ + cv->notify_all(info); +} + +inline void rl_WakeConditionVariable(rl_CONDITION_VARIABLE* cv, debug_info_param info) +{ + cv->notify_one(info); +} + +inline void rl_DeleteConditionVariable(rl_CONDITION_VARIABLE* cv, debug_info_param info) +{ + cv->deinit(info); +} + + + + + + +typedef unsigned long (RL_STDCALL *rl_WIN_START_ROUTINE)(void* param); +typedef unsigned (RL_STDCALL *rl_MSVCR_THREAD_ROUTINE)(void* param); + +template<typename thread_fn_t> +struct win32_thread_helper +{ + thread_fn_t fn; + void* param; + + static void* thread(void* p) + { + win32_thread_helper* self = (win32_thread_helper*)p; + void* result = (void*)(uintptr_t)(self->fn(self->param)); + delete_impl(self, $); + return result; + } +}; + +inline rl_HANDLE rl_CreateThread(void* security, unsigned stack_size, rl_WIN_START_ROUTINE fn, void* param, unsigned long creation_flags, unsigned long* thread_id, debug_info_param info) +{ + (void)security; + (void)stack_size; + (void)creation_flags; + (void)thread_id; + + void* mem = + ctx().alloc(sizeof(win32_thread_helper<rl_WIN_START_ROUTINE>), false, info); + win32_thread_helper<rl_WIN_START_ROUTINE>* arg = + new (mem) win32_thread_helper<rl_WIN_START_ROUTINE>; + arg->fn = fn; + arg->param = param; + win_waitable_object* handle = ctx().create_thread(&win32_thread_helper<rl_WIN_START_ROUTINE>::thread, arg); + return handle; +} + + +inline uintptr_t rl_beginthreadex(void *security, unsigned stack_size, rl_MSVCR_THREAD_ROUTINE start_address, void *arglist, unsigned initflag, unsigned* thrdaddr, debug_info_param info) +{ + (void)security; + (void)stack_size; + (void)initflag; + (void)thrdaddr; + + void* mem = ctx().alloc(sizeof(win32_thread_helper<rl_MSVCR_THREAD_ROUTINE>), false, info); + win32_thread_helper<rl_MSVCR_THREAD_ROUTINE>* arg = + new (mem) win32_thread_helper<rl_MSVCR_THREAD_ROUTINE>; + arg->fn = start_address; + arg->param = arglist; + win_waitable_object* handle = ctx().create_thread(&win32_thread_helper<rl_MSVCR_THREAD_ROUTINE>::thread, arg); + return (uintptr_t)handle; +} + +inline unsigned long rl_SetThreadAffinityMask(rl_HANDLE th, unsigned long affinity_mask, debug_info_param info) +{ + (void)(th); + (void)(affinity_mask); + (void)info; + return 0; +} + +inline int rl_SuspendThread(rl_HANDLE th, debug_info_param info) +{ + (void)th; + (void)info; + return 1; +} + +inline int rl_ResumeThread(rl_HANDLE th, debug_info_param info) +{ + (void)th; + (void)info; + return 1; +} + +inline unsigned long GetLastError() +{ + return (unsigned long)get_errno(); +} + +inline void SetLastError(unsigned long value) +{ + set_errno((int)value); +} + +inline void rl_FlushProcessWriteBuffers(debug_info_param info) +{ + systemwide_fence(info); +} + +} + + +#ifdef HANDLE +# undef HANDLE +#endif +#define HANDLE rl::rl_HANDLE + +#ifdef INFINITE +# undef INFINITE +#endif +#define INFINITE rl::rl_INFINITE + + +#ifdef WAIT_FAILED +# undef WAIT_FAILED +#endif +#define WAIT_FAILED rl::rl_WAIT_FAILED + +#ifdef WAIT_OBJECT_0 +# undef WAIT_OBJECT_0 +#endif +#define WAIT_OBJECT_0 rl::rl_WAIT_OBJECT_0 + +#ifdef WAIT_TIMEOUT +# undef WAIT_TIMEOUT +#endif +#define WAIT_TIMEOUT rl::rl_WAIT_TIMEOUT + +#ifdef WAIT_IO_COMPLETION +# undef WAIT_IO_COMPLETION +#endif +#define WAIT_IO_COMPLETION rl::rl_WAIT_IO_COMPLETION + +#ifdef MAXIMUM_WAIT_OBJECTS +# undef MAXIMUM_WAIT_OBJECTS +#endif +#define MAXIMUM_WAIT_OBJECTS rl::rl_MAXIMUM_WAIT_OBJECTS + + + +#define SwitchToThread() \ + rl::rl_SwitchToThread($) + +#define Sleep(milliseconds) \ + rl::rl_Sleep(milliseconds, $) + + + +#define CloseHandle(obj) \ + rl::rl_CloseHandle(obj, $) + +#define WaitForSingleObject(obj, timeout) \ + rl::rl_WaitForSingleObject(obj, timeout, $) + +#define WaitForMultipleObjects(count, objects, wait_all, timeout) \ + rl::rl_WaitForMultipleObjects(count, objects, wait_all, timeout, $) + +#define WaitForMultipleObjectsEx(count, objects, wait_all, timeout, alertable)] \ + rl::rl_WaitForMultipleObjectsEx(count, objects, wait_all, timeout, alertable, $) + +#define SignalObjectAndWait(obj_to_signal, obj_to_wait, timeout, alertable) \ + rl::rl_SignalObjectAndWait(obj_to_signal, obj_to_wait, timeout, alertable, $) + +#ifdef CreateSemaphore +# undef CreateSemaphore +#endif + +#ifdef CreateSemaphore +# undef ReleaseSemaphore +#endif + +#define CreateSemaphoreA rl_CreateSemaphore +#define CreateSemaphoreW rl_CreateSemaphore +#define CreateSemaphore rl_CreateSemaphore +#define rl_CreateSemaphore(security, initial_count, max_count, name) \ + rl::rl_CreateSemaphore(security, initial_count, max_count, name, $)\ + +#define ReleaseSemaphore(sema, count, prev_count) \ + rl::rl_ReleaseSemaphore(sema, count, prev_count, $) + + + +#ifdef CreateEvent +# undef CreateEvent +#endif +#define CreateEventA rl_CreateEvent +#define CreateEventW rl_CreateEvent +#define CreateEvent rl_CreateEvent +#define rl_CreateEvent(security, manual_reset, initial_state, name)\ + rl::rl_CreateEvent(security, manual_reset, initial_state, name, $) + +#define SetEvent(ev)\ + rl::rl_SetEvent(ev, $) + +#define ResetEvent(ev)\ + rl::rl_ResetEvent(ev, $) + +#define PulseEvent(ev)\ + rl::rl_PulseEvent(ev, $) + + +#ifdef CreateMutex +# undef CreateMutex +#endif +#define CreateMutexA rl_CreateMutex +#define CreateMutexW rl_CreateMutex +#define CreateMutex rl_CreateMutex +#define rl_CreateMutex(security, initial_owner, name)\ + rl::rl_CreateMutex(security, initial_owner, name, $) + +#define ReleaseMutex(mtx)\ + rl::rl_ReleaseMutex(mtx, $) + + + +#define CRITICAL_SECTION rl::rl_CRITICAL_SECTION + +#define InitializeCriticalSection(cs) \ + rl::rl_InitializeCriticalSection(cs, $) + +#define InitializeCriticalSectionAndSpinCount(cs, spin) \ + rl::rl_InitializeCriticalSectionAndSpinCount(cs, spin, $) + +#define InitializeCriticalSectionEx(cs, spin, flags) \ + rl::rl_InitializeCriticalSectionEx(cs, spin, flags, $) + +#define DeleteCriticalSection(cs) \ + rl::rl_DeleteCriticalSection(cs, $) + +#define EnterCriticalSection(cs) \ + rl::rl_EnterCriticalSection(cs, $) + +#define TryEnterCriticalSection(cs) \ + rl::rl_TryEnterCriticalSection(cs, $) + +#define LeaveCriticalSection(cs) \ + rl::rl_LeaveCriticalSection(cs, $) + + + + +#define SRWLOCK rl::rl_SRWLOCK + +#define InitializeSRWLock(lock) \ + rl::rl_InitializeSRWLock(lock, $) + +#define AcquireSRWLockExclusive(lock) \ + rl::rl_AcquireSRWLockExclusive(lock, $) + +#define AcquireSRWLockShared(lock) \ + rl::rl_AcquireSRWLockShared(lock, $) + +#define ReleaseSRWLockExclusive(lock) \ + rl::rl_ReleaseSRWLockExclusive(lock, $) + +#define ReleaseSRWLockShared(lock) \ + rl::rl_ReleaseSRWLockShared(lock, $) + +//!!! no such function in WIN API +#define DeleteSRWLock(lock) \ + rl::rl_DeleteSRWLock(lock, $) + + + + + + +#define CONDITION_VARIABLE rl::rl_CONDITION_VARIABLE + +#ifdef CONDITION_VARIABLE_LOCKMODE_SHARED +# undef CONDITION_VARIABLE_LOCKMODE_SHARED +#endif +#define CONDITION_VARIABLE_LOCKMODE_SHARED rl::rl_CONDITION_VARIABLE_LOCKMODE_SHARED + +#define InitializeConditionVariable(cv) \ + rl::rl_InitializeConditionVariable(cv, $) + +#define SleepConditionVariableCS(cv, cs, ms) \ + rl::rl_SleepConditionVariableCS(cv, cs, ms, $) + +#define SleepConditionVariableSRW(cv, lock, ms, flags) \ + rl::rl_SleepConditionVariableSRW(cv, lock, ms, flags, $) + +#define WakeAllConditionVariable(cv) \ + rl::rl_WakeAllConditionVariable(cv, $) + +#define WakeConditionVariable(cv) \ + rl::rl_WakeConditionVariable(cv, $) + +//!!! no such function in WIN API +#define DeleteConditionVariable(cv) \ + rl::rl_DeleteConditionVariable(cv, $) + + + +#define CreateThread(security, stack_size, fn, param, creation_flags, thread_id) \ + rl::rl_CreateThread(security, stack_size, fn, param, creation_flags, thread_id, $) + +#define _beginthreadex(security, stack_size, start_address, arglist, initflag, thrdaddr) \ + rl::rl_beginthreadex(security, stack_size, start_address, arglist, initflag, thrdaddr, $) + +#define SetThreadAffinityMask(th, affinity_mask) \ + rl::rl_SetThreadAffinityMask(th, affinity_mask, $) + +#define SuspendThread(th) \ + rl::rl_SuspendThread(th, $) + +#define ResumeThread(th) \ + rl::rl_ResumeThread(th, $) + +#define FlushProcessWriteBuffers() \ + rl::rl_FlushProcessWriteBuffers($) + + +#endif diff --git a/libs/relacy/relacy/sync_var.hpp b/libs/relacy/relacy/sync_var.hpp @@ -0,0 +1,66 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_SYNC_VAR_HPP +#define RL_SYNC_VAR_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "foreach.hpp" + + +namespace rl +{ + + +template<thread_id_t thread_count> +class sync_var : nocopy<> +{ +public: + sync_var() + { + iteration_begin(); + } + + void iteration_begin() + { + foreach<thread_count>(order_, &assign_zero); + } + + void acquire(thread_info_base* th) + { + th->own_acq_rel_order_ += 1; + foreach<thread_count>(th->acq_rel_order_, order_, &assign_max); + } + + void release(thread_info_base* th) + { + th->own_acq_rel_order_ += 1; + foreach<thread_count>(order_, th->acq_rel_order_, &assign_max); + } + + void acq_rel(thread_info_base* th) + { + th->own_acq_rel_order_ += 1; + timestamp_t* acq_rel_order = th->acq_rel_order_; + timestamp_t* order = order_; + foreach<thread_count>(acq_rel_order, order, &assign_max); + foreach<thread_count>(order, acq_rel_order, &assign_max); + } + +private: + timestamp_t order_ [thread_count]; +}; + + +} + +#endif diff --git a/libs/relacy/relacy/test_params.hpp b/libs/relacy/relacy/test_params.hpp @@ -0,0 +1,90 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_TEST_PARAMS_HPP +#define RL_TEST_PARAMS_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "test_result.hpp" + + +namespace rl +{ + +enum scheduler_type_e +{ + sched_random, + sched_bound, + sched_full, + sched_count, + + random_scheduler_type = sched_random, + fair_context_bound_scheduler_type = sched_bound, + fair_full_search_scheduler_type = sched_full, + scheduler_type_count +}; + +inline char const* format(scheduler_type_e t) +{ + switch (t) + { + case sched_random: return "random scheduler"; + case sched_bound: return "context bound scheduler"; + case sched_full: return "full search scheduler"; + default: break; + } + RL_VERIFY(false); + throw std::logic_error("invalid scheduler type"); +} + + +struct test_params +{ + // input params + iteration_t iteration_count; + std::ostream* output_stream; + std::ostream* progress_stream; + unsigned progress_output_period; + bool collect_history; + bool output_history; + scheduler_type_e search_type; + unsigned context_bound; + unsigned execution_depth_limit; + string initial_state; + + // output params + test_result_e test_result; + iteration_t stop_iteration; + string test_name; + string final_state; + + test_params() + { + iteration_count = 1000; + output_stream = &std::cout; + progress_stream = &std::cout; + progress_output_period = 3; + collect_history = false; + output_history = false; + search_type = random_scheduler_type; + context_bound = 1; + execution_depth_limit = 2000; + + test_result = test_result_success; + stop_iteration = 0; + } +}; + + +} + +#endif diff --git a/libs/relacy/relacy/test_result.hpp b/libs/relacy/relacy/test_result.hpp @@ -0,0 +1,111 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_TEST_RESULT_HPP +#define RL_TEST_RESULT_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" + + +namespace rl +{ + + +enum test_result_e +{ + test_result_success, + test_result_until_condition_hit, + test_result_inconsistent_test_suite, + test_result_user_assert_failed, + test_result_user_invariant_failed, + test_result_data_race, + test_result_access_to_freed_memory, + test_result_double_free, + test_result_memory_leak, + test_result_resource_leak, + test_result_unitialized_access, + test_result_deadlock, + test_result_livelock, + + // mutex + test_result_recursion_on_nonrecursive_mutex, + test_result_unlocking_mutex_wo_ownership, + test_result_destroying_owned_mutex, + test_result_double_initialization_of_mutex, + test_result_usage_of_non_initialized_mutex, + test_result_mutex_write_to_read_upgrade, + test_result_mutex_read_to_write_upgrade, + + //condvar + test_result_double_initialization_of_condvar, + test_result_usage_of_non_initialized_condvar, + + //semaphore + test_result_double_initialization_of_semaphore, + test_result_usage_of_non_initialized_semaphore, + + //event + test_result_double_initialization_of_event, + test_result_usage_of_non_initialized_event, + + //dynamic thread + test_result_thread_signal, +}; + + +inline char const* test_result_str(test_result_e r) +{ + switch (r) + { + case test_result_success: return "SUCCESS"; + case test_result_until_condition_hit: return "UNTIL CONDITION HIT"; + case test_result_inconsistent_test_suite: return "INCONSISTENT TEST SUITE"; + case test_result_user_assert_failed: return "USER ASSERT FAILED"; + case test_result_user_invariant_failed: return "USER INVARIANT FAILED"; + case test_result_data_race: return "DATA RACE"; + case test_result_access_to_freed_memory: return "ACCESS TO FREED MEMORY"; + case test_result_double_free: return "DOUBLE FREE"; + case test_result_memory_leak: return "MEMORY LEAK"; + case test_result_resource_leak: return "RESOURCE LEAK"; + case test_result_unitialized_access: return "ACCESS TO UNITIALIZED VARIABLE"; + case test_result_deadlock: return "DEADLOCK"; + case test_result_livelock: return "LIVELOCK"; + + // mutex + case test_result_recursion_on_nonrecursive_mutex: return "RECURSION ON NON-RECURSIVE MUTEX"; + case test_result_unlocking_mutex_wo_ownership: return "UNLOCKING MUTEX W/O OWNERSHIP"; + case test_result_destroying_owned_mutex: return "DESTROYING OWNED MUTEX"; + case test_result_double_initialization_of_mutex: return "DOUBLE INITIALIZATION OF MUTEX"; + case test_result_usage_of_non_initialized_mutex: return "USAGE OF NON INITIALIZED MUTEX"; + case test_result_mutex_write_to_read_upgrade: return "ATTEMPT TO UPGRADE EXCLUSIVE MUTEX OWNERSHIP TO SHARED"; + case test_result_mutex_read_to_write_upgrade: return "ATTEMPT TO UPGRADE SHARED MUTEX OWNERSHIP TO EXCLUSIVE"; + + // condvar + case test_result_double_initialization_of_condvar: return "DOUBLE INITIALIZATION OF CONDITION VARIABLE"; + case test_result_usage_of_non_initialized_condvar: return "USAGE OF NON INITIALIZED CONDITION VARIABLE"; + + // semaphore + case test_result_double_initialization_of_semaphore: return "DOUBLE INITIALIZATION OF SEMAPHORE"; + case test_result_usage_of_non_initialized_semaphore: return "USAGE OF NON INITIALIZED SEMAPHORE"; + + // event + case test_result_double_initialization_of_event: return "DOUBLE INITIALIZATION OF EVENT"; + case test_result_usage_of_non_initialized_event: return "USAGE OF NON INITIALIZED EVENT"; + + default: RL_VERIFY(false); return "UNKNOWN ERROR"; + } +} + + +} + +#endif diff --git a/libs/relacy/relacy/test_suite.hpp b/libs/relacy/relacy/test_suite.hpp @@ -0,0 +1,48 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_TEST_SUITE_HPP +#define RL_TEST_SUITE_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "test_result.hpp" + + +namespace rl +{ + + +template< + typename derived_t, + thread_id_t static_thread_count_param, + test_result_e result = test_result_success> +struct test_suite : nocopy<> +{ + static thread_id_t const dynamic_thread_count = 0; + + struct params + { + static thread_id_t const static_thread_count = static_thread_count_param; + static thread_id_t const dynamic_thread_count = derived_t::dynamic_thread_count; + static thread_id_t const thread_count = static_thread_count + dynamic_thread_count; + static test_result_e const expected_result = result; + }; + + void invariant() {} + void before() {} + void after() {} +}; + + +} + +#endif diff --git a/libs/relacy/relacy/thread.hpp b/libs/relacy/relacy/thread.hpp @@ -0,0 +1,415 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_THREAD_HPP +#define RL_THREAD_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "context_base.hpp" +#include "dyn_thread_ctx.hpp" +#include "thread_base.hpp" +#include "test_suite.hpp" +#include "memory_order.hpp" +#include "foreach.hpp" + + +namespace rl +{ + + + +struct atomic_data; +struct var_data; +template<thread_id_t thread_count> struct atomic_data_impl; +template<thread_id_t thread_count> struct var_data_impl; + + +template<thread_id_t thread_count> +struct thread_info : thread_info_base +{ + thread_info(thread_id_t index = 0) + : thread_info_base(index, acq_rel_order_) + { + } + + void iteration_begin() + { + sync_object_.iteration_begin(); + last_yield_ = 0; + dynamic_thread_func_ = 0; + dynamic_thread_param_ = 0; + for (thread_id_t j = 0; j != thread_count; ++j) + { + acq_rel_order_[j] = 0; + } + acq_rel_order_[index_] = 1; + temp_switch_from_ = -1; + saved_disable_preemption_ = -1; + } + + thread_sync_object<thread_count> sync_object_; + + timestamp_t acq_rel_order_ [thread_count]; + timestamp_t acquire_fence_order_ [thread_count]; + timestamp_t release_fence_order_ [thread_count]; + +#ifdef RL_IMPROVED_SEQ_CST_FENCE + timestamp_t imp_seq_cst_order_ [thread_count]; +#endif + + virtual void on_start() + { + RL_VERIFY(temp_switch_from_ == -1); + RL_VERIFY(saved_disable_preemption_ == -1); + sync_object_.on_start(); + } + + virtual void on_finish() + { + RL_VERIFY(temp_switch_from_ == -1); + RL_VERIFY(saved_disable_preemption_ == -1); + sync_object_.on_finish(); + } + + void atomic_thread_fence_acquire() + { + foreach<thread_count>( + acq_rel_order_, + acquire_fence_order_, + &assign_max); + } + + void atomic_thread_fence_release() + { + foreach<thread_count>( + release_fence_order_, + acq_rel_order_, + &assign); + } + + void atomic_thread_fence_acq_rel() + { + atomic_thread_fence_acquire(); + atomic_thread_fence_release(); + } + + void atomic_thread_fence_seq_cst(timestamp_t* seq_cst_fence_order) + { +#ifdef RL_IMPROVED_SEQ_CST_FENCE + foreach<thread_count>(acq_rel_order_, imp_seq_cst_order_, assign_max); +#endif + + atomic_thread_fence_acquire(); + + foreach<thread_count>( + acq_rel_order_, + seq_cst_fence_order, + &assign_max); + + foreach<thread_count>( + seq_cst_fence_order, + acq_rel_order_, + &assign); + + atomic_thread_fence_release(); + } + + virtual ~thread_info() {} // just to calm down gcc + +private: + thread_info(thread_info const&); + thread_info& operator = (thread_info const&); + + virtual unsigned atomic_load_relaxed(atomic_data* RL_RESTRICT data) + { + return atomic_load<mo_relaxed, false>(data); + } + + virtual unsigned atomic_load_acquire(atomic_data* RL_RESTRICT data) + { + return atomic_load<mo_acquire, false>(data); + } + + virtual unsigned atomic_load_seq_cst(atomic_data* RL_RESTRICT data) + { + return atomic_load<mo_seq_cst, false>(data); + } + + virtual unsigned atomic_load_relaxed_rmw(atomic_data* RL_RESTRICT data) + { + return atomic_load<mo_relaxed, true>(data); + } + + virtual unsigned atomic_load_acquire_rmw(atomic_data* RL_RESTRICT data) + { + return atomic_load<mo_acquire, true>(data); + } + + virtual unsigned atomic_load_seq_cst_rmw(atomic_data* RL_RESTRICT data) + { + return atomic_load<mo_seq_cst, true>(data); + } + + virtual unsigned atomic_store_relaxed(atomic_data* RL_RESTRICT data) + { + return atomic_store<mo_relaxed, false>(data); + } + + virtual unsigned atomic_store_release(atomic_data* RL_RESTRICT data) + { + return atomic_store<mo_release, false>(data); + } + + virtual unsigned atomic_store_seq_cst(atomic_data* RL_RESTRICT data) + { + return atomic_store<mo_seq_cst, false>(data); + } + + virtual unsigned atomic_rmw_relaxed(atomic_data* RL_RESTRICT data, bool& aba) + { + return atomic_rmw<mo_relaxed>(data, aba); + } + + virtual unsigned atomic_rmw_acquire(atomic_data* RL_RESTRICT data, bool& aba) + { + return atomic_rmw<mo_acquire>(data, aba); + } + + virtual unsigned atomic_rmw_release(atomic_data* RL_RESTRICT data, bool& aba) + { + return atomic_rmw<mo_release>(data, aba); + } + + virtual unsigned atomic_rmw_acq_rel(atomic_data* RL_RESTRICT data, bool& aba) + { + return atomic_rmw<mo_acq_rel>(data, aba); + } + + virtual unsigned atomic_rmw_seq_cst(atomic_data* RL_RESTRICT data, bool& aba) + { + return atomic_rmw<mo_seq_cst>(data, aba); + } + + template<memory_order mo, bool rmw> + unsigned get_load_index(atomic_data_impl<thread_count>& var) + { + typedef typename atomic_data_impl<thread_count>::history_record history_t; + + unsigned index = var.current_index_; + context& c = ctx(); + + if (false == val(rmw)) + { + size_t const limit = c.is_random_sched() ? atomic_history_size - 1: 1; + for (size_t i = 0; i != limit; ++i, --index) + { + history_t const& rec = var.history_[index % atomic_history_size]; + if (false == rec.busy_) + return (unsigned)-1; // access to unitialized var + + history_t const& prev = var.history_[(index - 1) % atomic_history_size]; + if (prev.busy_ && prev.last_seen_order_[index_] <= last_yield_) + break; + + if (mo_seq_cst == val(mo) && rec.seq_cst_) + break; + + timestamp_t acq_rel_order = + acq_rel_order_[rec.thread_id_]; + + if (acq_rel_order >= rec.acq_rel_timestamp_) + break; + + bool stop = false; + for (thread_id_t i = 0; i != thread_count; ++i) + { + timestamp_t acq_rel_order2 = acq_rel_order_[i]; + if (acq_rel_order2 >= rec.last_seen_order_[i]) + { + stop = true; + break; + } + } + if (stop) + break; + + if (0 == c.rand(2, sched_type_atomic_load)) + break; + } + } + + if (false == var.history_[index % atomic_history_size].busy_) + return (unsigned)-1; + + return index; + } + + template<memory_order mo, bool rmw> + unsigned atomic_load(atomic_data* RL_RESTRICT data) + { + RL_VERIFY(mo_release != mo || rmw); + RL_VERIFY(mo_acq_rel != mo || rmw); + + atomic_data_impl<thread_count>& var = + *static_cast<atomic_data_impl<thread_count>*>(data); + + typedef typename atomic_data_impl<thread_count>::history_record history_t; + + unsigned index = get_load_index<mo, rmw>(var); + if ((unsigned)-1 == index) + return (unsigned)-1; + + index %= atomic_history_size; + history_t& rec = var.history_[index]; + RL_VERIFY(rec.busy_); + + own_acq_rel_order_ += 1; + rec.last_seen_order_[index_] = own_acq_rel_order_; + + bool const synch = + (mo_acquire == mo + || mo_acq_rel == mo + || mo_seq_cst == mo); + + timestamp_t* acq_rel_order = (synch ? acq_rel_order_ : acquire_fence_order_); + + foreach<thread_count>(acq_rel_order, rec.acq_rel_order_, assign_max); + + return index; + } + + virtual unsigned atomic_init(atomic_data* RL_RESTRICT data) + { + atomic_data_impl<thread_count>& var = + *static_cast<atomic_data_impl<thread_count>*>(data); + + typedef typename atomic_data_impl<thread_count>::history_record history_t; + + unsigned const idx = ++var.current_index_ % atomic_history_size; + history_t& rec = var.history_[idx]; + + rec.busy_ = true; + rec.thread_id_ = index_; + rec.seq_cst_ = false; + rec.acq_rel_timestamp_ = 0; + + foreach<thread_count>(rec.acq_rel_order_, assign_zero); + + return idx; + } + + template<memory_order mo, bool rmw> + unsigned atomic_store(atomic_data* RL_RESTRICT data) + { + RL_VERIFY(mo_consume != mo || rmw); + RL_VERIFY(mo_acquire != mo || rmw); + RL_VERIFY(mo_acq_rel != mo || rmw); + + atomic_data_impl<thread_count>& var = + *static_cast<atomic_data_impl<thread_count>*>(data); + + typedef typename atomic_data_impl<thread_count>::history_record history_t; + + unsigned const idx = ++var.current_index_ % atomic_history_size; + history_t& rec = var.history_[idx]; + + rec.busy_ = true; + rec.thread_id_ = index_; + rec.seq_cst_ = (mo_seq_cst == mo); + + own_acq_rel_order_ += 1; + rec.acq_rel_timestamp_ = own_acq_rel_order_; + + foreach<thread_count>(rec.last_seen_order_, assign<(timestamp_t)-1>); + + rec.last_seen_order_[index_] = own_acq_rel_order_; + + unsigned const prev_idx = (var.current_index_ - 1) % atomic_history_size; + history_t& prev = var.history_[prev_idx]; + +#ifdef RL_IMPROVED_SEQ_CST_FENCE + if (val(mo) == mo_release && val(rmw) == false) + foreach<thread_count>(imp_seq_cst_order_, prev.acq_rel_order_, assign_max); +#endif + + bool const synch = + (mo_release == mo + || mo_acq_rel == mo + || mo_seq_cst == mo); + + bool const preserve = + prev.busy_ && (rmw || (index_ == prev.thread_id_)); + + timestamp_t* acq_rel_order = (synch ? acq_rel_order_ : release_fence_order_); + + if (preserve) + { + foreach<thread_count>(rec.acq_rel_order_, prev.acq_rel_order_, assign); + foreach<thread_count>(rec.acq_rel_order_, acq_rel_order, assign_max); + } + else + { + foreach<thread_count>(rec.acq_rel_order_, acq_rel_order, assign); + } + + return idx; + } + + template<memory_order mo> + unsigned atomic_rmw(atomic_data* RL_RESTRICT data, bool& aba) + { + atomic_data_impl<thread_count>& var = + *static_cast<atomic_data_impl<thread_count>*>(data); + timestamp_t const last_seen = var.history_[var.current_index_ % atomic_history_size].last_seen_order_[index_]; + aba = (last_seen > own_acq_rel_order_); + atomic_load<mo, true>(data); + unsigned result = atomic_store<mo, true>(data); + +#ifdef RL_IMPROVED_SEQ_CST_RMW + atomic_thread_fence_seq_cst(ctx_->seq_cst_fence_order_); +#endif + + return result; + } + + virtual unpark_reason atomic_wait(atomic_data* RL_RESTRICT data, bool is_timed, bool allow_spurious_wakeup, debug_info_param info) + { + context& c = ctx(); + atomic_data_impl<thread_count>& var = + *static_cast<atomic_data_impl<thread_count>*>(data); + unpark_reason const res = var.futex_ws_.park_current(c, is_timed, allow_spurious_wakeup, false, info); + if (res == unpark_reason_normal) + var.futex_sync_.acquire(this); + return res; + } + + virtual thread_id_t atomic_wake(atomic_data* RL_RESTRICT data, thread_id_t count, debug_info_param info) + { + context& c = ctx(); + atomic_data_impl<thread_count>& var = + *static_cast<atomic_data_impl<thread_count>*>(data); + thread_id_t unblocked = 0; + for (; count != 0; count -= 1, unblocked += 1) + { + if (var.futex_ws_.unpark_one(c, info) == false) + break; + } + if (unblocked != 0) + var.futex_sync_.release(this); + return unblocked; + } +}; + + +} + +#endif diff --git a/libs/relacy/relacy/thread_base.hpp b/libs/relacy/relacy/thread_base.hpp @@ -0,0 +1,95 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_THREAD_BASE_HPP +#define RL_THREAD_BASE_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "context_base.hpp" +//#include "test_suite.hpp" +//#include "memory_order.hpp" +//#include "foreach.hpp" + + +namespace rl +{ + + + +struct atomic_data; +struct var_data; +template<thread_id_t thread_count> struct atomic_data_impl; +template<thread_id_t thread_count> struct var_data_impl; + + +class thread_info_base +{ +public: + virtual void on_start() = 0; + virtual void on_finish() = 0; + + virtual unsigned atomic_init(atomic_data* RL_RESTRICT data) = 0; + + virtual unsigned atomic_load_relaxed(atomic_data* RL_RESTRICT data) = 0; + virtual unsigned atomic_load_acquire(atomic_data* RL_RESTRICT data) = 0; + virtual unsigned atomic_load_seq_cst(atomic_data* RL_RESTRICT data) = 0; + virtual unsigned atomic_load_relaxed_rmw(atomic_data* RL_RESTRICT data) = 0; + virtual unsigned atomic_load_acquire_rmw(atomic_data* RL_RESTRICT data) = 0; + virtual unsigned atomic_load_seq_cst_rmw(atomic_data* RL_RESTRICT data) = 0; + + virtual unsigned atomic_store_relaxed(atomic_data* RL_RESTRICT data) = 0; + virtual unsigned atomic_store_release(atomic_data* RL_RESTRICT data) = 0; + virtual unsigned atomic_store_seq_cst(atomic_data* RL_RESTRICT data) = 0; + + virtual unsigned atomic_rmw_relaxed(atomic_data* RL_RESTRICT data, bool& aba) = 0; + virtual unsigned atomic_rmw_acquire(atomic_data* RL_RESTRICT data, bool& aba) = 0; + virtual unsigned atomic_rmw_release(atomic_data* RL_RESTRICT data, bool& aba) = 0; + virtual unsigned atomic_rmw_acq_rel(atomic_data* RL_RESTRICT data, bool& aba) = 0; + virtual unsigned atomic_rmw_seq_cst(atomic_data* RL_RESTRICT data, bool& aba) = 0; + + virtual unpark_reason atomic_wait(atomic_data* RL_RESTRICT data, bool is_timed, bool allow_spurious_wakeup, debug_info_param info) = 0; + virtual thread_id_t atomic_wake(atomic_data* RL_RESTRICT data, thread_id_t count, debug_info_param info) = 0; + + virtual ~thread_info_base() {} // just to calm down gcc + + fiber_t fiber_; + thread_id_t const index_; + context* ctx_; + timestamp_t* const acq_rel_order_; + timestamp_t last_yield_; + timestamp_t& own_acq_rel_order_; + unpark_reason unpark_reason_; + thread_id_t temp_switch_from_; + int saved_disable_preemption_; + int errno_; + void* (*dynamic_thread_func_)(void*); + void* dynamic_thread_param_; + //unsigned disable_history_; + + thread_info_base(thread_id_t index, timestamp_t* acq_rel_order) + : index_(index) + , acq_rel_order_(acq_rel_order) + , own_acq_rel_order_(acq_rel_order[index]) + { + } + +private: + thread_info_base(thread_info_base const&); + thread_info_base& operator = (thread_info_base const&); +}; + + + + +} + +#endif diff --git a/libs/relacy/relacy/thread_local.hpp b/libs/relacy/relacy/thread_local.hpp @@ -0,0 +1,192 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_THREAD_LOCAL_HPP +#define RL_THREAD_LOCAL_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "signature.hpp" +#include "context.hpp" + + +namespace rl +{ + + +class generic_thread_local : nocopy<> +{ +public: + generic_thread_local() + : index_(-1) + { + } + + ~generic_thread_local() + { + } + + void init(void (*dtor)(intptr_t), debug_info_param info) + { + sign_.check(info); + //RL_ASSERT(index_ == -1); + index_ = ctx().thread_local_alloc(dtor); + } + + void deinit(debug_info_param info) + { + sign_.check(info); + RL_ASSERT(index_ != -1); + ctx().thread_local_free(index_); + index_ = -1; + } + + void set(intptr_t value, debug_info_param info) + { + sign_.check(info); + ctx().thread_local_set(index_, value); + } + + intptr_t get(debug_info_param info) + { + sign_.check(info); + return ctx().thread_local_get(index_); + } + +private: + signature<0xf1724ae2> sign_; + int index_; +}; + + +template<typename T> +class thread_local_var; + + +template<typename T> +class thread_local_proxy +{ +public: + thread_local_proxy(thread_local_var<T>& var, debug_info_param info) + : var_(var) + , info_(info) + {} + + operator T () const + { + return var_.get(info_); + } + + T operator -> () const + { + return var_.get(info_); + } + + thread_local_proxy operator = (T value) + { + var_.set(value, info_); + return *this; + } + +private: + thread_local_var<T>& var_; + debug_info info_; + thread_local_proxy& operator = (thread_local_proxy const&); +}; + + +template<typename T> +class thread_local_var : generic_thread_local +{ +public: + thread_local_var() + : ctx_seq_() + { + } + + ~thread_local_var() + { + } + + thread_local_proxy<T> operator () (debug_info_param info) + { + return thread_local_proxy<T>(*this, info); + } + + void set(T value, debug_info_param info) + { + if (ctx_seq_ != ctx().get_ctx_seq()) + { + ctx_seq_ = ctx().get_ctx_seq(); + generic_thread_local::init(0, info); + } + generic_thread_local::set((intptr_t)value, info); + } + + T get(debug_info_param info) + { + if (ctx_seq_ != ctx().get_ctx_seq()) + { + ctx_seq_ = ctx().get_ctx_seq(); + generic_thread_local::init(0, info); + } + return (T)generic_thread_local::get(info); + } + +private: + unsigned ctx_seq_; +}; + + +inline unsigned long rl_TlsAlloc(debug_info_param info) +{ +#ifndef RL_GC + //!!! may break on x64 platform + // TLS index is exactly DWORD (not DWORD_PTR), so one has to use indirection + return (unsigned long)new (info) thread_local_var<void*> (); +#else + void* p = ctx().alloc(sizeof(thread_local_var<void*>), false, info); + new (p) thread_local_var<void*> (); + return (unsigned long)p; +#endif +} + +inline void rl_TlsFree(unsigned long slot, debug_info_param info) +{ +#ifndef RL_GC + delete_impl((thread_local_var<void*>*)slot, info); +#else + thread_local_var<void*>* t = (thread_local_var<void*>*)slot; + t->~thread_local_var<void*>(); + ctx().free(t, false, info); +#endif +} + +inline void* rl_TlsGetValue(unsigned long slot, debug_info_param info) +{ + return ((thread_local_var<void*>*)slot)->get(info); +} + +inline int rl_TlsSetValue(unsigned long slot, void* value, debug_info_param info) +{ + ((thread_local_var<void*>*)slot)->set(value, info); + return 1; +} + + +#define TlsAlloc() rl::rl_TlsAlloc($) +#define TlsFree(slot) rl::rl_TlsFree((slot), $) +#define TlsGetValue(slot) rl::rl_TlsGetValue((slot), $) +#define TlsSetValue(slot, value) rl::rl_TlsSetValue((slot), (value), $) + +} + +#endif diff --git a/libs/relacy/relacy/thread_local_ctx.hpp b/libs/relacy/relacy/thread_local_ctx.hpp @@ -0,0 +1,122 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_THREAD_LOCAL_CTX_HPP +#define RL_THREAD_LOCAL_CTX_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "test_params.hpp" + + +namespace rl +{ + + +struct thread_local_context_iface +{ + virtual int thread_local_alloc (void (*dtor)(intptr_t)) = 0; + virtual void thread_local_free (int index) = 0; + virtual void thread_local_set (int index, intptr_t value) = 0; + virtual intptr_t thread_local_get (int index) = 0; + virtual ~thread_local_context_iface () {} // to calm down g++ +}; + + + + +template<typename base_t, thread_id_t thread_count> +class thread_local_contxt_impl : protected base_t +{ +public: + thread_local_contxt_impl(thread_id_t thread_count_param, test_params& params) + : base_t(thread_count_param, params) + { + } + + void iteration_begin() + { + base_t::iteration_begin(); + + for (size_t ent = 0; ent != entries_.size(); ent += 1) + { + for (size_t th = 0; th != thread_count; th += 1) + { + entries_[ent].value_[th] = 0; + } + } + } + +private: + struct entry + { + bool alive_; + intptr_t value_ [thread_count]; + void (*dtor_) (intptr_t); + }; + + typename vector<entry>::type entries_; + using base_t::current_thread; + + virtual int thread_local_alloc (void (*dtor)(intptr_t)) + { + int index = (int)entries_.size(); + entries_.resize(index + 1); + entry& ent = entries_[index]; + ent.alive_ = true; + ent.dtor_ = dtor; + for (size_t i = 0; i != thread_count; ++i) + { + ent.value_[i] = 0; + } + return index; + } + + virtual void thread_local_free (int index) + { + RL_VERIFY(index >= 0 && (size_t)index < entries_.size()); + entry& ent = entries_[index]; + RL_VERIFY(ent.alive_); + ent.alive_ = false; + if (ent.dtor_) + { + for (size_t i = 0; i != thread_count; ++i) + { + if (ent.value_[i]) + { + ent.dtor_(ent.value_[i]); + } + } + } + } + + virtual void thread_local_set (int index, intptr_t value) + { + RL_VERIFY(index >= 0 && (size_t)index < entries_.size()); + entry& ent = entries_[index]; + RL_VERIFY(ent.alive_); + ent.value_[current_thread()] = value; + } + + virtual intptr_t thread_local_get (int index) + { + RL_VERIFY(index >= 0 && (size_t)index < entries_.size()); + entry& ent = entries_[index]; + RL_VERIFY(ent.alive_); + return ent.value_[current_thread()]; + } +}; + + + +} + +#endif diff --git a/libs/relacy/relacy/var.hpp b/libs/relacy/relacy/var.hpp @@ -0,0 +1,388 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_VAR_HPP +#define RL_VAR_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "context.hpp" +#include "signature.hpp" +#include "atomic_events.hpp" + + +namespace rl +{ + +template<typename T> +class var; + + + +template<typename T> +class var_proxy_const +{ +public: + var_proxy_const(var<T> const& v, debug_info_param info) + : var_(const_cast<var<T>&>(v)) + , info_(info) + { + } + + T load() const + { + return var_.load(info_); + } + + operator T () const + { + return this->load(); + } + + T const operator -> () const + { + return this->load(); + } + +protected: + var<T>& var_; + debug_info info_; + +private: + var_proxy_const& operator = (var_proxy_const const&); +}; + + + + +template<typename T> +class var_proxy : public var_proxy_const<T> +{ +public: + typedef typename atomic_add_type<T>::type add_type; + + var_proxy(var<T>& v, debug_info_param info) + : var_proxy_const<T>(v, info) + { + } + + void store(T value) + { + this->var_.store(value, this->info_); + } + + template<typename Y> + T operator = (var_proxy_const<Y> const& v) + { + Y y = v.load(); + T t = y; + store(t); + return t; + } + + T operator = (var_proxy<T> const& v) + { + T t = v.load(); + store(t); + return t; + } + + T operator = (T value) + { + store(value); + return value; + } + + T operator -> () + { + return this->load(); + } + + T operator ++ (int) + { + T v = this->load(); + T y = ++v; + this->store(y); + return v; + } + + T operator -- (int) + { + T v = this->load(); + T y = --v; + this->store(y); + return v; + } + + T operator ++ () + { + T v = this->load(); + this->store(++v); + return v; + } + + T operator -- () + { + T v = this->load(); + this->store(--v); + return v; + } + + T operator += (add_type value) + { + T v = this->load(); + v += value; + this->store(v); + return v; + } + + T operator -= (add_type value) + { + T v = this->load(); + v -= value; + this->store(v); + return v; + } + + T operator &= (T value) + { + T v = this->load(); + v &= value; + this->store(v); + return v; + } + + T operator |= (T value) + { + T v = this->load(); + v |= value; + this->store(v); + return v; + } + + T operator ^= (T value) + { + T v = this->load(); + v ^= value; + this->store(v); + return v; + } +}; + + + + +template<typename T> +struct var_event +{ + debug_info var_info_; + var<T> const* var_addr_; + T value_; + bool load_; + + template<typename Y> + struct map_type + { + typedef T result; + }; + + template<typename Y> + struct map_type<Y*> + { + typedef void* result; + }; + + void output(std::ostream& s) const + { + s << "<" << std::hex << var_addr_ << std::dec << "> " + << (load_ ? "load" : "store") << ", value=" << (typename map_type<T>::result)value_; + } +}; + + + + +template<typename T> +class var +{ +public: + var() + { + value_ = 0; + initialized_ = false; + data_ = ctx().var_ctor(); + } + + var(T value) + { + init(value); + } + + var(var const& r) + { + init(r.load($)); + } + + ~var() + { + sign_.check($); + ctx().var_dtor(data_); + } + + var_proxy_const<T> operator () (debug_info_param info) const + { + return var_proxy_const<T>(*this, info); + } + + var_proxy<T> operator () (debug_info_param info) + { + return var_proxy<T>(*this, info); + } + +private: + T value_; + bool initialized_; + + var_data* data_; + + signature<123456789> sign_; + friend class var_proxy<T>; + friend class var_proxy_const<T>; + + void init(T value) + { + context& c = ctx(); + initialized_ = true; + value_ = value; + data_ = ctx().var_ctor(); + data_->init(*c.threadx_); + } + + T load(debug_info_param info) const + { + context& c = ctx(); + sign_.check(info); + + if (false == initialized_) + { + RL_HIST(var_event<T>) {RL_INFO, this, T(), true} RL_HIST_END(); + RL_ASSERT_IMPL(false, test_result_unitialized_access, "", info); + } + + if (false == c.invariant_executing) + { + if (false == data_->load(*c.threadx_)) + { + RL_HIST(var_event<T>) {RL_INFO, this, T(), true} RL_HIST_END(); + RL_ASSERT_IMPL(false, test_result_data_race, "data race detected", info); + } + + T const v = value_; + + RL_HIST(var_event<T>) {RL_INFO, this, v, true} RL_HIST_END(); + + return v; + } + else + { + return value_; + } + } + + void store(T v, debug_info_param info) + { + context& c = ctx(); + RL_VERIFY(false == c.invariant_executing); + sign_.check(info); + + if (initialized_) + { + if (false == data_->store(*c.threadx_)) + { + RL_HIST(var_event<T>) {RL_INFO, this, T(), false} RL_HIST_END(); + RL_ASSERT_IMPL(false, test_result_data_race, "data race detected", info); + } + } + else + { + initialized_ = true; + data_->init(*c.threadx_); + } + + value_ = v; + + RL_HIST(var_event<T>) {RL_INFO, this, v, false} RL_HIST_END(); + } + + var& operator = (var const& r); +}; + + + + +template<thread_id_t thread_count> +struct var_data_impl : var_data +{ + typedef thread_info<thread_count> thread_info_t; + + timestamp_t load_acq_rel_timestamp_ [thread_count]; + timestamp_t store_acq_rel_timestamp_ [thread_count]; + + var_data_impl() + { + foreach<thread_count>(load_acq_rel_timestamp_, assign_zero); + foreach<thread_count>(store_acq_rel_timestamp_, assign_zero); + } + + virtual void init(thread_info_base& th) + { + th.own_acq_rel_order_ += 1; + store_acq_rel_timestamp_[th.index_] = th.own_acq_rel_order_; + } + + virtual bool store(thread_info_base& th) + { + for (thread_id_t i = 0; i != thread_count; ++i) + { + if (th.acq_rel_order_[i] < store_acq_rel_timestamp_[i]) + return false; + if (th.acq_rel_order_[i] < load_acq_rel_timestamp_[i]) + return false; + } + + th.own_acq_rel_order_ += 1; + store_acq_rel_timestamp_[th.index_] = th.own_acq_rel_order_; + return true; + } + + virtual bool load(thread_info_base& th) + { + for (thread_id_t i = 0; i != thread_count; ++i) + { + if (th.acq_rel_order_[i] < store_acq_rel_timestamp_[i]) + return false; + } + + th.own_acq_rel_order_ += 1; + load_acq_rel_timestamp_[th.index_] = th.own_acq_rel_order_; + return true; + } + + virtual ~var_data_impl() {} // just to calm down gcc +}; + + + +} + + + +#endif diff --git a/libs/relacy/relacy/volatile.hpp b/libs/relacy/relacy/volatile.hpp @@ -0,0 +1,24 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_VOLATILE_HPP +#define RL_VOLATILE_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" + + +namespace rl +{ + +} + +#endif diff --git a/libs/relacy/relacy/waitset.hpp b/libs/relacy/relacy/waitset.hpp @@ -0,0 +1,198 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_WAITSET_HPP +#define RL_WAITSET_HPP +#ifdef _MSC_VER +# pragma once +#endif + +#include "base.hpp" +#include "thread_base.hpp" +#include "context_base.hpp" + + +namespace rl +{ + + +template<thread_id_t thread_count> +class waitset +{ +public: + waitset() + { + size_ = 0; + } + + unpark_reason park_current(context& c, + bool is_timed, + bool allow_spurious_wakeup, + bool do_switch, + debug_info_param info) + { + RL_VERIFY(size_ < thread_count); + thread_info_base* th = c.threadx_; + thread_desc desc = {th, 0, 0, 0, false, do_switch}; + set_[size_] = desc; + size_ += 1; + unpark_reason reason = c.park_current_thread(is_timed, allow_spurious_wakeup, do_switch, info); + if (reason == unpark_reason_normal) + { + if (do_switch) + RL_VERIFY(c.threadx_->temp_switch_from_ != -1); + else + RL_VERIFY(c.threadx_->temp_switch_from_ == -1); + } + else + { + remove(th); + } + return reason; + } + + static unpark_reason park_current(context& c, + waitset** ws, + win_waitable_object** wo, + size_t count, + bool wait_all, + bool is_timed, + bool do_switch, + debug_info_param info) + { + thread_info_base* th = c.threadx_; + thread_desc desc = {th, (unsigned)count, ws, wo, wait_all, do_switch}; + for (unsigned wsi = 0; wsi != count; ++wsi) + { + RL_VERIFY(ws[wsi]->size_ < thread_count); + ws[wsi]->set_[ws[wsi]->size_] = desc; + ws[wsi]->size_ += 1; + } + unpark_reason reason = c.park_current_thread(is_timed, false, do_switch, info); + if (reason == unpark_reason_normal) + { + if (do_switch) + RL_VERIFY(c.threadx_->temp_switch_from_ != -1); + else + RL_VERIFY(c.threadx_->temp_switch_from_ == -1); + } + else + { + remove(th, ws, (unsigned)count); + } + return reason; + } + + bool unpark_one(context& c, debug_info_param info) + { + if (0 == size_) + return false; + //!!! too high preassure on full sched + thread_id_t idx = c.rand(size_, sched_type_user); + if (try_remove(c, idx, info)) + return true; + for (idx = 0; idx != size_; idx += 1) + { + if (try_remove(c, idx, info)) + return true; + } + return false; + } + + thread_id_t unpark_all(context& c, debug_info_param info) + { + thread_id_t cnt = 0; + for (thread_id_t idx = 0; idx != size_; idx += 1) + { + if (try_remove(c, idx, info)) + { + cnt += 1; + idx -= 1; + } + } + return cnt; + } + + thread_id_t size() const + { + return size_; + } + + operator bool () const + { + return 0 != size_; + } + +private: + struct thread_desc + { + thread_info_base* th_; + unsigned count_; // 0 - wfso, !0 - wfmo + waitset** ws_; // 0 - wfso, !0 - wfmo + win_waitable_object** wo_; // 0 - wfso, !0 - wfmo + bool wait_all_; + bool do_switch_; + }; + + thread_desc set_ [thread_count]; + thread_id_t size_; + + bool try_remove(context& c, thread_id_t const idx, debug_info_param info) + { + RL_VERIFY(idx < size_); + thread_desc const& d = set_[idx]; + if (d.count_ != 0 && d.wait_all_ == true) + { + for (size_t i = 0; i != d.count_; i += 1) + { + if (d.wo_[i]->is_signaled(info) == false) + return false; + } + } + size_t const tid = d.th_->index_; + bool const do_switch = d.do_switch_; + if (d.ws_) + remove(d.th_, d.ws_, d.count_); + else + remove(d.th_); + c.unpark_thread(tid, do_switch, info); + return true; + } + + void remove(thread_info_base* th) + { + thread_id_t size = size_; + thread_id_t i = 0; + for (; i != size; ++i) + { + if (set_[i].th_ == th) + break; + } + RL_VERIFY(i != size); + for (thread_id_t j = i + 1; j != size; ++j) + { + set_[j - 1] = set_[j]; + } + size_ -= 1; + } + + static void remove(thread_info_base* th, waitset** ws, unsigned count) + { + for (unsigned wsi = 0; wsi != count; ++wsi) + { + ws[wsi]->remove(th); + } + } +}; + + +} + + +#endif diff --git a/libs/relacy/relacy/windows.h b/libs/relacy/relacy/windows.h @@ -0,0 +1,21 @@ +/* Relacy Race Detector + * Copyright (c) 2008-2010, Dmitry S. Vyukov + * All rights reserved. + * This software is provided AS-IS with no warranty, either express or implied. + * This software is distributed under a license and may not be copied, + * modified or distributed except as expressly authorized under the + * terms of the license contained in the file LICENSE.TXT in this distribution. + */ + +#ifndef RL_WINDOWS_IFACE_HPP +#define RL_WINDOWS_IFACE_HPP +#ifdef _MSC_VER +# pragma once +#endif + + +#include "relacy.hpp" +#include "stdlib/windows.hpp" + + +#endif diff --git a/relacy.cc b/relacy.cc @@ -0,0 +1,56 @@ +#include "libs/relacy/relacy/pch.hpp" +#include "libs/relacy/relacy/relacy_std.hpp" +#include "intrinsics.h" +#include "nonblocking_fixed_spsc_queue.h" + +#define QUEUE_SIZE 64 +#define ITERS 256 + +struct NonblockingFixedSPSCQueueTest : rl::test_suite< NonblockingFixedSPSCQueueTest, 2 > { + NonblockingFixedSPSCQueue< int, QUEUE_SIZE > q; + + void thread( unsigned int thread_index ) { + if( thread_index == 0 ) { + for( int i = 0; i < ITERS; i++ ) { + int x; + while( !q.dequeue( &x ) ) continue; + RL_ASSERT( x == i ); + } + } + else { + for( int i = 0; i < ITERS; i++ ) { + while( !q.enqueue( i ) ) continue; + } + } + } +}; + +struct NonblockingFixedSPSCQueueTestNoCopy : rl::test_suite< NonblockingFixedSPSCQueueTestNoCopy, 2 > { + NonblockingFixedSPSCQueue< int, QUEUE_SIZE > q; + + void thread( unsigned int thread_index ) { + if( thread_index == 0 ) { + for( int i = 0; i < ITERS; i++ ) { + for( ;; ) { + int * x = q.dequeue_acquire(); + if( x == NULL ) continue; + RL_ASSERT( *x == i ); + q.dequeue_release(); + break; + } + } + } + else { + for( int i = 0; i < ITERS; i++ ) { + while( !q.enqueue( i ) ) continue; + } + } + } +}; + +int main() { + rl::simulate< NonblockingFixedSPSCQueueTest >(); + rl::simulate< NonblockingFixedSPSCQueueTestNoCopy >(); + + return 0; +}