Sirikata
|
00001 /* Sirikata Utilities -- Sirikata Synchronization Utilities 00002 * AtomicTypes.hpp 00003 * 00004 * Copyright (c) 2008, Daniel Reiter Horn 00005 * All rights reserved. 00006 * 00007 * Redistribution and use in source and binary forms, with or without 00008 * modification, are permitted provided that the following conditions are 00009 * met: 00010 * * Redistributions of source code must retain the above copyright 00011 * notice, this list of conditions and the following disclaimer. 00012 * * Redistributions in binary form must reproduce the above copyright 00013 * notice, this list of conditions and the following disclaimer in 00014 * the documentation and/or other materials provided with the 00015 * distribution. 00016 * * Neither the name of Sirikata nor the names of its contributors may 00017 * be used to endorse or promote products derived from this software 00018 * without specific prior written permission. 00019 * 00020 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 00021 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 00022 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 00023 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER 00024 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 00025 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 00026 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 */ 00032 00033 #ifndef _SIRIKATA_ATOMIC_TYPES_HPP_ 00034 #define _SIRIKATA_ATOMIC_TYPES_HPP_ 00035 00036 #ifdef __APPLE__ 00037 #include <libkern/OSAtomic.h> 00038 #endif 00039 namespace Sirikata { 00040 00041 #ifdef _WIN32 00042 template <int size> class SizedAtomicValue { 00043 00044 }; 00045 00046 template<> class SizedAtomicValue<4> { 00047 public: 00048 template<typename T> static T add(volatile T*scalar, T other) { 00049 return other + (T)InterlockedExchangeAdd((volatile LONG*)scalar,(int32) other); 00050 } 00051 template<typename T> static T inc(volatile T*scalar) { 00052 return (T)InterlockedIncrement((volatile LONG*)scalar); 00053 } 00054 template<typename T> static T dec(volatile T*scalar) { 00055 return (T)InterlockedDecrement((volatile LONG*)scalar); 00056 } 00057 }; 00058 template<> class SizedAtomicValue<8> { 00059 public: 00060 template<typename T> static T add(volatile T*scalar, T other) { 00061 return other + (T)InterlockedExchangeAdd64((volatile LONGLONG*)scalar,(LONGLONG) other); 00062 } 00063 template<typename T> static T inc(volatile T*scalar) { 00064 return (T)InterlockedIncrement64((volatile LONGLONG*)scalar); 00065 } 00066 template<typename T> static T dec(volatile T*scalar) { 00067 return (T)InterlockedDecrement64((volatile LONGLONG*)scalar); 00068 } 00069 }; 00070 #elif defined(__APPLE__) 00071 template<int size> class SizedAtomicValue { 00072 00073 }; 00074 00075 template<> class SizedAtomicValue<4> { 00076 public: 00077 template <typename T> static T add(volatile T* scalar,T other) { 00078 return (T)OSAtomicAdd32((int32)other, (int32*)scalar); 00079 } 00080 template <typename T> static T inc(volatile T*scalar) { 00081 return (T)OSAtomicIncrement32((int32*)scalar); 00082 } 00083 template <typename T> static T dec(volatile T*scalar) { 00084 return (T)OSAtomicDecrement32((int32*)scalar); 00085 } 00086 }; 00087 00091 template<> class SizedAtomicValue<8> { 00092 public: 00093 template <typename T> static T add(volatile T* scalar, T other) { 00094 return (T)OSAtomicAdd64((int64)other, (int64*)scalar); 00095 } 00096 template <typename T> static T inc(volatile T*scalar) { 00097 return (T)OSAtomicIncrement64((int64*)scalar); 00098 } 00099 template <typename T> static T dec(volatile T*scalar) { 00100 return (T)OSAtomicDecrement64((int64*)scalar); 00101 } 00102 }; 00103 #else 00104 template<int size> class SizedAtomicValue { 00105 public: 00106 template <typename T> static T add(volatile T*scalar, T other) { 00107 return __sync_add_and_fetch(scalar, other); 00108 } 00109 template <typename T> static T inc(volatile T*scalar) { 00110 return __sync_add_and_fetch(scalar, 1); 00111 } 00112 template <typename T> static T dec(volatile T*scalar) { 00113 return __sync_sub_and_fetch(scalar, 1); 00114 } 00115 }; 00116 #endif 00117 #ifdef _WIN32 00118 #pragma warning( push ) 00119 #pragma warning (disable : 4312) 00120 #pragma warning (disable : 4197) 00121 #endif 00122 template <typename T> 00123 class AtomicValue { 00124 private: 00125 volatile char mMemory[sizeof(T)+(sizeof(T)==4?4:(sizeof(T)==2?2:(sizeof(T)==8?8:16)))]; 00126 static volatile T*getThisAlignedAddress(volatile char* data) { 00127 size_t bitandammt=sizeof(T)==4?3:(sizeof(T)==2?1:(sizeof(T)==8?7:15)); 00128 size_t notbitandammt=~bitandammt; 00129 return (volatile T*)((((size_t)data)+bitandammt)¬bitandammt); 00130 } 00131 static volatile const T*getThisAlignedAddress(volatile const char* data) { 00132 size_t bitandammt=sizeof(T)==4?3:(sizeof(T)==2?1:(sizeof(T)==8?7:15)); 00133 size_t notbitandammt=~bitandammt; 00134 return (volatile const T*)((((size_t)data)+bitandammt)¬bitandammt); 00135 } 00136 public: 00137 AtomicValue() { 00138 } 00139 explicit AtomicValue (T other) { 00140 *(T*)getThisAlignedAddress(mMemory)=other; 00141 } 00142 AtomicValue(const AtomicValue&other) { 00143 *(T*)getThisAlignedAddress(mMemory)=*(T*)getThisAlignedAddress(other.mMemory); 00144 } 00145 const AtomicValue<T>& operator =(T other) { 00146 *(T*)getThisAlignedAddress(mMemory)=other; 00147 return *this; 00148 } 00149 const AtomicValue& operator =(const AtomicValue& other) { 00150 *(T*)getThisAlignedAddress(mMemory)=*(T*)getThisAlignedAddress(other.mMemory); 00151 return *this; 00152 } 00153 bool operator ==(T other) const{ 00154 return *(T*)getThisAlignedAddress(mMemory)==other; 00155 } 00156 bool operator ==(const AtomicValue& other) const{ 00157 return *(T*)getThisAlignedAddress(mMemory)==*(T*)getThisAlignedAddress(other.mMemory); 00158 } 00159 operator T ()const { 00160 return *(T*)getThisAlignedAddress(mMemory); 00161 } 00162 T read() const { 00163 return *(T*)getThisAlignedAddress(mMemory); 00164 } 00165 T operator +=(const T&other) { 00166 return SizedAtomicValue<sizeof(T)>::add(getThisAlignedAddress(mMemory),other); 00167 } 00168 T operator -=(const T&other) { 00169 T temp=0; 00170 temp-=other;//to avoid unsigned unary operator problems, just wrap 00171 return *this+=temp; 00172 } 00173 T operator ++() { 00174 return (T)SizedAtomicValue<sizeof(T)>::inc(getThisAlignedAddress(mMemory)); 00175 } 00176 T operator --() { 00177 return (T)SizedAtomicValue<sizeof(T)>::dec(getThisAlignedAddress(mMemory)); 00178 } 00179 T operator++(int) { 00180 return (++*this)-(T)1; 00181 } 00182 T operator--(int) { 00183 return (--*this)+(T)1; 00184 } 00185 }; 00186 00187 template <class Node> 00188 inline bool compare_and_swap(volatile Node*volatile *target, volatile Node *comperand, volatile Node * exchange){ 00189 #ifdef _WIN32 00190 return InterlockedCompareExchangePointer((volatile PVOID*)target, (volatile PVOID)exchange, (volatile PVOID)comperand)==comperand; 00191 #else 00192 #ifdef __APPLE__ 00193 if (sizeof(exchange)==4) { 00194 return OSAtomicCompareAndSwap32((int32_t)comperand, (int32_t)exchange, (int32_t*)target); 00195 }else { 00196 return OSAtomicCompareAndSwap64((int64_t)comperand, (int64_t)exchange, (int64_t*)target); 00197 } 00198 #else 00199 return __sync_bool_compare_and_swap (target, comperand, exchange); 00200 #endif 00201 #endif 00202 } 00203 00204 #ifdef _WIN32 00205 #pragma warning( pop ) 00206 #endif 00207 } 00208 #endif //_SIRIKATA_ATOMIC_TYPES_HPP_