V8 Project
v8::base Namespace Reference

Namespaces

 anonymous_namespace{condition-variable-unittest.cc}
 
 anonymous_namespace{flags-unittest.cc}
 
 anonymous_namespace{platform-posix.cc}
 
 anonymous_namespace{platform-unittest.cc}
 
 anonymous_namespace{platform-win32.cc}
 
 anonymous_namespace{semaphore-unittest.cc}
 
 anonymous_namespace{time-unittest.cc}
 
 bits
 
 internal
 

Classes

struct  AtomicOps_x86CPUFeatureStruct
 
class  FINAL
 
struct  MagicNumbersForDivision
 
struct  LeakyInstanceTrait
 
struct  StaticallyAllocatedInstanceTrait
 
struct  DynamicallyAllocatedInstanceTrait
 
struct  DefaultConstructTrait
 
struct  DefaultCreateTrait
 
struct  ThreadSafeInitOnceTrait
 
struct  SingleThreadInitOnceTrait
 
struct  LazyInstanceImpl
 
struct  LazyStaticInstance
 
struct  LazyInstance
 
struct  LazyDynamicInstance
 
struct  OneArgFunction
 
class  PosixMemoryMappedFile
 
class  TimezoneCache
 
class  Win32Time
 
class  Win32MemoryMappedFile
 
class  OS
 
class  VirtualMemory
 
class  Thread
 
struct  CreateSemaphoreTrait
 
struct  LazySemaphore
 
class  KernelTimestampClock
 
class  RandomNumberGeneratorTest
 

Typedefs

typedef char Atomic8
 
typedef int32_t Atomic32
 
typedef intptr_t AtomicWord
 
typedef char __tsan_atomic8
 
typedef short __tsan_atomic16
 
typedef int __tsan_atomic32
 
typedef long __tsan_atomic64
 
typedef char __tsan_atomic128
 
typedef MagicNumbersForDivision< uint32_tM32
 
typedef MagicNumbersForDivision< uint64_t > M64
 
typedef AtomicWord OnceType
 
typedef void(* NoArgFunction) ()
 
typedef void(* PointerArgFunction) (void *arg)
 
typedef LazyStaticInstance< ConditionVariable, DefaultConstructTrait< ConditionVariable >, ThreadSafeInitOnceTrait >::type LazyConditionVariable
 
typedef LazyStaticInstance< Mutex, DefaultConstructTrait< Mutex >, ThreadSafeInitOnceTrait >::type LazyMutex
 
typedef LazyStaticInstance< RecursiveMutex, DefaultConstructTrait< RecursiveMutex >, ThreadSafeInitOnceTrait >::type LazyRecursiveMutex
 
typedef IN PSTR UserSearchPath
 
typedef IN PSTR IN BOOL fInvadeProcess
 
typedef OUT PSTR SearchPath
 
typedef OUT PSTR IN DWORD SearchPathLength
 
typedef IN HANDLE hFile
 
typedef IN HANDLE IN PSTR ImageName
 
typedef IN HANDLE IN PSTR IN PSTR ModuleName
 
typedef IN HANDLE IN PSTR IN PSTR IN DWORD64 BaseOfDll
 
typedef IN HANDLE IN PSTR IN PSTR IN DWORD64 IN DWORD SizeOfDll
 
typedef HANDLE hProcess
 
typedef HANDLE HANDLE hThread
 
typedef HANDLE HANDLE LPSTACKFRAME64 StackFrame
 
typedef HANDLE HANDLE LPSTACKFRAME64 PVOID ContextRecord
 
typedef HANDLE HANDLE LPSTACKFRAME64 PVOID PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine
 
typedef HANDLE HANDLE LPSTACKFRAME64 PVOID PREAD_PROCESS_MEMORY_ROUTINE64 PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine
 
typedef HANDLE HANDLE LPSTACKFRAME64 PVOID PREAD_PROCESS_MEMORY_ROUTINE64 PFUNCTION_TABLE_ACCESS_ROUTINE64 PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine
 
typedef HANDLE HANDLE LPSTACKFRAME64 PVOID PREAD_PROCESS_MEMORY_ROUTINE64 PFUNCTION_TABLE_ACCESS_ROUTINE64 PGET_MODULE_BASE_ROUTINE64 PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress
 
typedef IN DWORD64 qwAddr
 
typedef IN DWORD64 OUT PDWORD64 pdwDisplacement
 
typedef IN DWORD64 OUT PDWORD64 OUT PIMAGEHLP_SYMBOL64 Symbol
 
typedef IN DWORD64 OUT PDWORD OUT PIMAGEHLP_LINE64 Line64
 
typedef DWORD64 AddrBase
 
typedef DWORD th32ProcessID
 
typedef LPMODULEENTRY32W lpme
 

Enumerations

enum  __tsan_memory_order {
  __tsan_memory_order_relaxed , __tsan_memory_order_consume , __tsan_memory_order_acquire , __tsan_memory_order_release ,
  __tsan_memory_order_acq_rel , __tsan_memory_order_seq_cst
}
 
enum  { ONCE_STATE_UNINITIALIZED = 0 , ONCE_STATE_EXECUTING_FUNCTION = 1 , ONCE_STATE_DONE = 2 }
 
enum  OutputMode { UNKNOWN , CONSOLE , ODS }
 

Functions

Atomic32 NoBarrier_CompareAndSwap (volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
 
Atomic32 NoBarrier_AtomicExchange (volatile Atomic32 *ptr, Atomic32 new_value)
 
Atomic32 NoBarrier_AtomicIncrement (volatile Atomic32 *ptr, Atomic32 increment)
 
Atomic32 Barrier_AtomicIncrement (volatile Atomic32 *ptr, Atomic32 increment)
 
Atomic32 Acquire_CompareAndSwap (volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
 
Atomic32 Release_CompareAndSwap (volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
 
void MemoryBarrier ()
 
void NoBarrier_Store (volatile Atomic8 *ptr, Atomic8 value)
 
void NoBarrier_Store (volatile Atomic32 *ptr, Atomic32 value)
 
void Acquire_Store (volatile Atomic32 *ptr, Atomic32 value)
 
void Release_Store (volatile Atomic32 *ptr, Atomic32 value)
 
Atomic8 NoBarrier_Load (volatile const Atomic8 *ptr)
 
Atomic32 NoBarrier_Load (volatile const Atomic32 *ptr)
 
Atomic32 Acquire_Load (volatile const Atomic32 *ptr)
 
Atomic32 Release_Load (volatile const Atomic32 *ptr)
 
Atomic64 NoBarrier_CompareAndSwap (volatile Atomic64 *ptr, Atomic64 old_value, Atomic64 new_value)
 
Atomic64 NoBarrier_AtomicExchange (volatile Atomic64 *ptr, Atomic64 new_value)
 
Atomic64 NoBarrier_AtomicIncrement (volatile Atomic64 *ptr, Atomic64 increment)
 
Atomic64 Barrier_AtomicIncrement (volatile Atomic64 *ptr, Atomic64 increment)
 
Atomic64 Acquire_CompareAndSwap (volatile Atomic64 *ptr, Atomic64 old_value, Atomic64 new_value)
 
Atomic64 Release_CompareAndSwap (volatile Atomic64 *ptr, Atomic64 old_value, Atomic64 new_value)
 
void NoBarrier_Store (volatile Atomic64 *ptr, Atomic64 value)
 
void Acquire_Store (volatile Atomic64 *ptr, Atomic64 value)
 
void Release_Store (volatile Atomic64 *ptr, Atomic64 value)
 
Atomic64 NoBarrier_Load (volatile const Atomic64 *ptr)
 
Atomic64 Acquire_Load (volatile const Atomic64 *ptr)
 
Atomic64 Release_Load (volatile const Atomic64 *ptr)
 
AtomicWord NoBarrier_CompareAndSwap (volatile AtomicWord *ptr, AtomicWord old_value, AtomicWord new_value)
 
AtomicWord NoBarrier_AtomicExchange (volatile AtomicWord *ptr, AtomicWord new_value)
 
AtomicWord NoBarrier_AtomicIncrement (volatile AtomicWord *ptr, AtomicWord increment)
 
AtomicWord Barrier_AtomicIncrement (volatile AtomicWord *ptr, AtomicWord increment)
 
AtomicWord Acquire_CompareAndSwap (volatile AtomicWord *ptr, AtomicWord old_value, AtomicWord new_value)
 
AtomicWord Release_CompareAndSwap (volatile AtomicWord *ptr, AtomicWord old_value, AtomicWord new_value)
 
void NoBarrier_Store (volatile AtomicWord *ptr, AtomicWord value)
 
void Acquire_Store (volatile AtomicWord *ptr, AtomicWord value)
 
void Release_Store (volatile AtomicWord *ptr, AtomicWord value)
 
AtomicWord NoBarrier_Load (volatile const AtomicWord *ptr)
 
AtomicWord Acquire_Load (volatile const AtomicWord *ptr)
 
AtomicWord Release_Load (volatile const AtomicWord *ptr)
 
__tsan_atomic8 __tsan_atomic8_load (const volatile __tsan_atomic8 *a, __tsan_memory_order mo)
 
__tsan_atomic16 __tsan_atomic16_load (const volatile __tsan_atomic16 *a, __tsan_memory_order mo)
 
__tsan_atomic32 __tsan_atomic32_load (const volatile __tsan_atomic32 *a, __tsan_memory_order mo)
 
__tsan_atomic64 __tsan_atomic64_load (const volatile __tsan_atomic64 *a, __tsan_memory_order mo)
 
__tsan_atomic128 __tsan_atomic128_load (const volatile __tsan_atomic128 *a, __tsan_memory_order mo)
 
void __tsan_atomic8_store (volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
 
void __tsan_atomic16_store (volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
 
void __tsan_atomic32_store (volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
 
void __tsan_atomic64_store (volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
 
void __tsan_atomic128_store (volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
 
__tsan_atomic8 __tsan_atomic8_exchange (volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
 
__tsan_atomic16 __tsan_atomic16_exchange (volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
 
__tsan_atomic32 __tsan_atomic32_exchange (volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
 
__tsan_atomic64 __tsan_atomic64_exchange (volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
 
__tsan_atomic128 __tsan_atomic128_exchange (volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
 
__tsan_atomic8 __tsan_atomic8_fetch_add (volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
 
__tsan_atomic16 __tsan_atomic16_fetch_add (volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
 
__tsan_atomic32 __tsan_atomic32_fetch_add (volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
 
__tsan_atomic64 __tsan_atomic64_fetch_add (volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
 
__tsan_atomic128 __tsan_atomic128_fetch_add (volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
 
__tsan_atomic8 __tsan_atomic8_fetch_and (volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
 
__tsan_atomic16 __tsan_atomic16_fetch_and (volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
 
__tsan_atomic32 __tsan_atomic32_fetch_and (volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
 
__tsan_atomic64 __tsan_atomic64_fetch_and (volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
 
__tsan_atomic128 __tsan_atomic128_fetch_and (volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
 
__tsan_atomic8 __tsan_atomic8_fetch_or (volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
 
__tsan_atomic16 __tsan_atomic16_fetch_or (volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
 
__tsan_atomic32 __tsan_atomic32_fetch_or (volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
 
__tsan_atomic64 __tsan_atomic64_fetch_or (volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
 
__tsan_atomic128 __tsan_atomic128_fetch_or (volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
 
__tsan_atomic8 __tsan_atomic8_fetch_xor (volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
 
__tsan_atomic16 __tsan_atomic16_fetch_xor (volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
 
__tsan_atomic32 __tsan_atomic32_fetch_xor (volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
 
__tsan_atomic64 __tsan_atomic64_fetch_xor (volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
 
__tsan_atomic128 __tsan_atomic128_fetch_xor (volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
 
__tsan_atomic8 __tsan_atomic8_fetch_nand (volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
 
__tsan_atomic16 __tsan_atomic16_fetch_nand (volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
 
__tsan_atomic32 __tsan_atomic32_fetch_nand (volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
 
__tsan_atomic64 __tsan_atomic64_fetch_nand (volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
 
__tsan_atomic128 __tsan_atomic128_fetch_nand (volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
 
int __tsan_atomic8_compare_exchange_weak (volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
 
int __tsan_atomic16_compare_exchange_weak (volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
 
int __tsan_atomic32_compare_exchange_weak (volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
 
int __tsan_atomic64_compare_exchange_weak (volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
 
int __tsan_atomic128_compare_exchange_weak (volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
 
int __tsan_atomic8_compare_exchange_strong (volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
 
int __tsan_atomic16_compare_exchange_strong (volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
 
int __tsan_atomic32_compare_exchange_strong (volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
 
int __tsan_atomic64_compare_exchange_strong (volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
 
int __tsan_atomic128_compare_exchange_strong (volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
 
__tsan_atomic8 __tsan_atomic8_compare_exchange_val (volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
 
__tsan_atomic16 __tsan_atomic16_compare_exchange_val (volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
 
__tsan_atomic32 __tsan_atomic32_compare_exchange_val (volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
 
__tsan_atomic64 __tsan_atomic64_compare_exchange_val (volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
 
__tsan_atomic128 __tsan_atomic128_compare_exchange_val (volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
 
void __tsan_atomic_thread_fence (__tsan_memory_order mo)
 
void __tsan_atomic_signal_fence (__tsan_memory_order mo)
 
Atomic32 Acquire_AtomicExchange (volatile Atomic32 *ptr, Atomic32 new_value)
 
Atomic32 Release_AtomicExchange (volatile Atomic32 *ptr, Atomic32 new_value)
 
Atomic64 Acquire_AtomicExchange (volatile Atomic64 *ptr, Atomic64 new_value)
 
Atomic64 Release_AtomicExchange (volatile Atomic64 *ptr, Atomic64 new_value)
 
 TEST (CPUTest, FeatureImplications)
 
 TEST (CPUTest, RequiredFeatures)
 
template<class T >
std::ostream & operator<< (std::ostream &os, const MagicNumbersForDivision< T > &mag)
 
static M32 s32 (int32_t d)
 
static M64 s64 (int64_t d)
 
static M32 u32 (uint32_t d)
 
static M64 u64 (uint64_t d)
 
 TEST (DivisionByConstant, Signed32)
 
 TEST (DivisionByConstant, Unsigned32)
 
 TEST (DivisionByConstant, Signed64)
 
 TEST (DivisionByConstant, Unsigned64)
 
template<class T >
MagicNumbersForDivision< TSignedDivisionByConstant (T d)
 
template<class T >
MagicNumbersForDivision< TUnsignedDivisionByConstant (T d, unsigned leading_zeros)
 
template MagicNumbersForDivision< uint32_tSignedDivisionByConstant (uint32_t d)
 
template MagicNumbersForDivision< uint64_t > SignedDivisionByConstant (uint64_t d)
 
template MagicNumbersForDivision< uint32_tUnsignedDivisionByConstant (uint32_t d, unsigned leading_zeros)
 
template MagicNumbersForDivision< uint64_t > UnsignedDivisionByConstant (uint64_t d, unsigned leading_zeros)
 
 TEST (FlagsTest, BasicOperations)
 
 TEST (FlagsTest, NamespaceScope)
 
 TEST (FlagsTest, ClassScope)
 
void DumpBacktrace ()
 
void CallOnceImpl (OnceType *once, PointerArgFunction init_func, void *arg)
 
void CallOnce (OnceType *once, NoArgFunction init_func)
 
template<typename Arg >
void CallOnce (OnceType *once, typename OneArgFunction< Arg * >::type init_func, Arg *arg)
 
 TEST (ConditionVariable, WaitForAfterNofityOnSameThread)
 
 TEST (ConditionVariable, MultipleThreadsWithSeparateConditionVariables)
 
 TEST (ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables)
 
 TEST (ConditionVariable, LoopIncrement)
 
 TEST (Mutex, LockGuardMutex)
 
 TEST (Mutex, LockGuardRecursiveMutex)
 
 TEST (Mutex, LockGuardLazyMutex)
 
 TEST (Mutex, LockGuardLazyRecursiveMutex)
 
 TEST (Mutex, MultipleMutexes)
 
 TEST (Mutex, MultipleRecursiveMutexes)
 
static void * RandomizedVirtualAlloc (size_t size, int action, int protection)
 
static unsigned StringToLong (char *buffer)
 
static void SetThreadName (const char *name)
 
static void * ThreadEntry (void *arg)
 
static Thread::LocalStorageKey PthreadKeyToLocalKey (pthread_key_t pthread_key)
 
static pthread_key_t LocalKeyToPthreadKey (Thread::LocalStorageKey local_key)
 
 TEST (OS, GetCurrentProcessId)
 
 TEST (Thread, SelfJoin)
 
 TEST_F (ThreadLocalStorageTest, DoTest)
 
static bool HasConsole ()
 
static void VPrintHelper (FILE *stream, const char *format, va_list args)
 
static size_t GetPageSize ()
 
static void * RandomizedVirtualAlloc (size_t size, int action, int protection)
 
typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymInitialize))(IN HANDLE hProcess
 
typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymGetOptions))(VOID)
 
typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymSetOptions))(IN DWORD SymOptions)
 
typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSearchPath))(IN HANDLE hProcess
 
typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymLoadModule64))(IN HANDLE hProcess
 
typedef BOOL (__stdcall *DLL_FUNC_TYPE(StackWalk64))(DWORD MachineType
 
typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSymFromAddr64))(IN HANDLE hProcess
 
typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetLineFromAddr64))(IN HANDLE hProcess
 
typedef PVOID (__stdcall *DLL_FUNC_TYPE(SymFunctionTableAccess64))(HANDLE hProcess
 
typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymGetModuleBase64))(HANDLE hProcess
 
typedef HANDLE (__stdcall *DLL_FUNC_TYPE(CreateToolhelp32Snapshot))(DWORD dwFlags
 
typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32FirstW))(HANDLE hSnapshot
 
typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot
 
static bool LoadDbgHelpAndTlHelp32 ()
 
static std::vector< OS::SharedLibraryAddressLoadSymbols (HANDLE process_handle)
 
static unsigned int __stdcall ThreadEntry (void *arg)
 
 TEST (Semaphore, ProducerConsumer)
 
 TEST (Semaphore, WaitAndSignal)
 
 TEST (Semaphore, WaitFor)
 
 TEST (TimeDelta, FromAndIn)
 
 TEST (Time, JsTime)
 
 TEST (Time, NowResolution)
 
 TEST (TimeTicks, NowResolution)
 
 TEST (TimeTicks, HighResolutionNowResolution)
 
 TEST (TimeTicks, IsMonotonic)
 
Time operator+ (const TimeDelta &delta, const Time &time)
 
TimeTicks operator+ (const TimeDelta &delta, const TimeTicks &ticks)
 
template<typename Dst , typename Src >
bool IsValueInRangeForNumericType (Src value)
 
template<typename Dst , typename Src >
Dst checked_cast (Src value)
 
template<typename Dst , typename Src >
Dst saturated_cast (Src value)
 
 TEST (SysInfoTest, NumberOfProcessors)
 
 TEST (SysInfoTest, DISABLE_ON_NACL(AmountOfPhysicalMemory))
 
 TEST (SysInfoTest, AmountOfVirtualMemory)
 
 TEST_P (RandomNumberGeneratorTest, NextIntWithMaxValue)
 
 TEST_P (RandomNumberGeneratorTest, NextBooleanReturnsFalseOrTrue)
 
 TEST_P (RandomNumberGeneratorTest, NextDoubleReturnsValueBetween0And1)
 
 INSTANTIATE_TEST_CASE_P (RandomSeeds, RandomNumberGeneratorTest, ::testing::Values(INT_MIN, -1, 0, 1, 42, 100, 1234567890, 987654321, INT_MAX))
 

Variables

struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures
 
static const int kMmapFd = -1
 
static const int kMmapFdOffset = 0
 
static const int kMmapFd = -1
 
static const int kMmapFdOffset = 0
 
static const int kMmapFd = VM_MAKE_TAG(255)
 
static const off_t kMmapFdOffset = 0
 
static const int kMmapFd = -1
 
static const int kMmapFdOffset = 0
 
static LazyInstance< RandomNumberGenerator >::type platform_random_number_generator = LAZY_INSTANCE_INITIALIZER
 
static const pthread_t kNoThread = (pthread_t) 0
 
static const int kMmapFd = -1
 
static const int kMmapFdOffset = 0
 
static const int kMmapFd = -1
 
static const int kMmapFdOffset = 0
 
static OutputMode output_mode = UNKNOWN
 
static LazyInstance< RandomNumberGenerator >::type platform_random_number_generator = LAZY_INSTANCE_INITIALIZER
 
static const HANDLE kNoThread = INVALID_HANDLE_VALUE
 
static LazyStaticInstance< KernelTimestampClock, DefaultConstructTrait< KernelTimestampClock >, ThreadSafeInitOnceTrait >::type kernel_tick_clock
 
static const int kMaxRuns = 12345
 
static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER
 
static RandomNumberGenerator::EntropySource entropy_source = NULL
 

Typedef Documentation

◆ __tsan_atomic128

Definition at line 30 of file atomicops_internals_tsan.h.

◆ __tsan_atomic16

Definition at line 21 of file atomicops_internals_tsan.h.

◆ __tsan_atomic32

Definition at line 22 of file atomicops_internals_tsan.h.

◆ __tsan_atomic64

Definition at line 23 of file atomicops_internals_tsan.h.

◆ __tsan_atomic8

Definition at line 20 of file atomicops_internals_tsan.h.

◆ AddrBase

Definition at line 994 of file platform-win32.cc.

◆ Atomic32

typedef int32_t v8::base::Atomic32

Definition at line 44 of file atomicops.h.

◆ Atomic8

typedef char v8::base::Atomic8

Definition at line 43 of file atomicops.h.

◆ AtomicWord

typedef intptr_t v8::base::AtomicWord

Definition at line 57 of file atomicops.h.

◆ BaseOfDll

Definition at line 969 of file platform-win32.cc.

◆ ContextRecord

typedef HANDLE HANDLE LPSTACKFRAME64 PVOID v8::base::ContextRecord

Definition at line 976 of file platform-win32.cc.

◆ fInvadeProcess

Definition at line 957 of file platform-win32.cc.

◆ FunctionTableAccessRoutine

typedef HANDLE HANDLE LPSTACKFRAME64 PVOID PREAD_PROCESS_MEMORY_ROUTINE64 PFUNCTION_TABLE_ACCESS_ROUTINE64 v8::base::FunctionTableAccessRoutine

Definition at line 978 of file platform-win32.cc.

◆ GetModuleBaseRoutine

typedef HANDLE HANDLE LPSTACKFRAME64 PVOID PREAD_PROCESS_MEMORY_ROUTINE64 PFUNCTION_TABLE_ACCESS_ROUTINE64 PGET_MODULE_BASE_ROUTINE64 v8::base::GetModuleBaseRoutine

Definition at line 979 of file platform-win32.cc.

◆ hFile

Definition at line 966 of file platform-win32.cc.

◆ hProcess

Definition at line 973 of file platform-win32.cc.

◆ hThread

Definition at line 974 of file platform-win32.cc.

◆ ImageName

Definition at line 967 of file platform-win32.cc.

◆ LazyConditionVariable

Definition at line 112 of file condition-variable.h.

◆ LazyMutex

Definition at line 105 of file mutex.h.

◆ LazyRecursiveMutex

Definition at line 186 of file mutex.h.

◆ Line64

typedef IN DWORD64 OUT PDWORD OUT PIMAGEHLP_LINE64 v8::base::Line64

Definition at line 990 of file platform-win32.cc.

◆ lpme

typedef LPMODULEENTRY32W v8::base::lpme

Definition at line 1004 of file platform-win32.cc.

◆ M32

◆ M64

Definition at line 27 of file division-by-constant-unittest.cc.

◆ ModuleName

typedef IN HANDLE IN PSTR IN PSTR v8::base::ModuleName

Definition at line 968 of file platform-win32.cc.

◆ NoArgFunction

typedef void(* v8::base::NoArgFunction) ()

Definition at line 72 of file once.h.

◆ OnceType

Definition at line 60 of file once.h.

◆ pdwDisplacement

typedef IN DWORD64 OUT PDWORD v8::base::pdwDisplacement

Definition at line 984 of file platform-win32.cc.

◆ PointerArgFunction

typedef void(* v8::base::PointerArgFunction) (void *arg)

Definition at line 73 of file once.h.

◆ qwAddr

Definition at line 983 of file platform-win32.cc.

◆ ReadMemoryRoutine

typedef HANDLE HANDLE LPSTACKFRAME64 PVOID PREAD_PROCESS_MEMORY_ROUTINE64 v8::base::ReadMemoryRoutine

Definition at line 977 of file platform-win32.cc.

◆ SearchPath

typedef OUT PSTR v8::base::SearchPath

Definition at line 962 of file platform-win32.cc.

◆ SearchPathLength

Definition at line 963 of file platform-win32.cc.

◆ SizeOfDll

Definition at line 970 of file platform-win32.cc.

◆ StackFrame

typedef HANDLE HANDLE LPSTACKFRAME64 v8::base::StackFrame

Definition at line 975 of file platform-win32.cc.

◆ Symbol

typedef IN DWORD64 OUT PDWORD64 OUT PIMAGEHLP_SYMBOL64 v8::base::Symbol

Definition at line 985 of file platform-win32.cc.

◆ th32ProcessID

Definition at line 1002 of file platform-win32.cc.

◆ TranslateAddress

typedef HANDLE HANDLE LPSTACKFRAME64 PVOID PREAD_PROCESS_MEMORY_ROUTINE64 PFUNCTION_TABLE_ACCESS_ROUTINE64 PGET_MODULE_BASE_ROUTINE64 PTRANSLATE_ADDRESS_ROUTINE64 v8::base::TranslateAddress

Definition at line 980 of file platform-win32.cc.

◆ UserSearchPath

Definition at line 956 of file platform-win32.cc.

Enumeration Type Documentation

◆ anonymous enum

anonymous enum
Enumerator
ONCE_STATE_UNINITIALIZED 
ONCE_STATE_EXECUTING_FUNCTION 
ONCE_STATE_DONE 

Definition at line 66 of file once.h.

66  {
69  ONCE_STATE_DONE = 2
70 };
@ ONCE_STATE_UNINITIALIZED
Definition: once.h:67
@ ONCE_STATE_DONE
Definition: once.h:69
@ ONCE_STATE_EXECUTING_FUNCTION
Definition: once.h:68

◆ __tsan_memory_order

Enumerator
__tsan_memory_order_relaxed 
__tsan_memory_order_consume 
__tsan_memory_order_acquire 
__tsan_memory_order_release 
__tsan_memory_order_acq_rel 
__tsan_memory_order_seq_cst 

Definition at line 34 of file atomicops_internals_tsan.h.

◆ OutputMode

Enumerator
UNKNOWN 
CONSOLE 
ODS 

Definition at line 533 of file platform-win32.cc.

533  {
534  UNKNOWN, // Output method has not yet been determined.
535  CONSOLE, // Output is written to stdout.
536  ODS // Output is written to debug facility.
537 };

Function Documentation

◆ __tsan_atomic128_compare_exchange_strong()

int v8::base::__tsan_atomic128_compare_exchange_strong ( volatile __tsan_atomic128 a,
__tsan_atomic128 c,
__tsan_atomic128  v,
__tsan_memory_order  mo,
__tsan_memory_order  fail_mo 
)

◆ __tsan_atomic128_compare_exchange_val()

__tsan_atomic128 v8::base::__tsan_atomic128_compare_exchange_val ( volatile __tsan_atomic128 a,
__tsan_atomic128  c,
__tsan_atomic128  v,
__tsan_memory_order  mo,
__tsan_memory_order  fail_mo 
)

◆ __tsan_atomic128_compare_exchange_weak()

int v8::base::__tsan_atomic128_compare_exchange_weak ( volatile __tsan_atomic128 a,
__tsan_atomic128 c,
__tsan_atomic128  v,
__tsan_memory_order  mo,
__tsan_memory_order  fail_mo 
)

◆ __tsan_atomic128_exchange()

__tsan_atomic128 v8::base::__tsan_atomic128_exchange ( volatile __tsan_atomic128 a,
__tsan_atomic128  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic128_fetch_add()

__tsan_atomic128 v8::base::__tsan_atomic128_fetch_add ( volatile __tsan_atomic128 a,
__tsan_atomic128  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic128_fetch_and()

__tsan_atomic128 v8::base::__tsan_atomic128_fetch_and ( volatile __tsan_atomic128 a,
__tsan_atomic128  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic128_fetch_nand()

__tsan_atomic128 v8::base::__tsan_atomic128_fetch_nand ( volatile __tsan_atomic128 a,
__tsan_atomic128  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic128_fetch_or()

__tsan_atomic128 v8::base::__tsan_atomic128_fetch_or ( volatile __tsan_atomic128 a,
__tsan_atomic128  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic128_fetch_xor()

__tsan_atomic128 v8::base::__tsan_atomic128_fetch_xor ( volatile __tsan_atomic128 a,
__tsan_atomic128  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic128_load()

__tsan_atomic128 v8::base::__tsan_atomic128_load ( const volatile __tsan_atomic128 a,
__tsan_memory_order  mo 
)

◆ __tsan_atomic128_store()

void v8::base::__tsan_atomic128_store ( volatile __tsan_atomic128 a,
__tsan_atomic128  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic16_compare_exchange_strong()

int v8::base::__tsan_atomic16_compare_exchange_strong ( volatile __tsan_atomic16 a,
__tsan_atomic16 c,
__tsan_atomic16  v,
__tsan_memory_order  mo,
__tsan_memory_order  fail_mo 
)

◆ __tsan_atomic16_compare_exchange_val()

__tsan_atomic16 v8::base::__tsan_atomic16_compare_exchange_val ( volatile __tsan_atomic16 a,
__tsan_atomic16  c,
__tsan_atomic16  v,
__tsan_memory_order  mo,
__tsan_memory_order  fail_mo 
)

◆ __tsan_atomic16_compare_exchange_weak()

int v8::base::__tsan_atomic16_compare_exchange_weak ( volatile __tsan_atomic16 a,
__tsan_atomic16 c,
__tsan_atomic16  v,
__tsan_memory_order  mo,
__tsan_memory_order  fail_mo 
)

◆ __tsan_atomic16_exchange()

__tsan_atomic16 v8::base::__tsan_atomic16_exchange ( volatile __tsan_atomic16 a,
__tsan_atomic16  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic16_fetch_add()

__tsan_atomic16 v8::base::__tsan_atomic16_fetch_add ( volatile __tsan_atomic16 a,
__tsan_atomic16  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic16_fetch_and()

__tsan_atomic16 v8::base::__tsan_atomic16_fetch_and ( volatile __tsan_atomic16 a,
__tsan_atomic16  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic16_fetch_nand()

__tsan_atomic16 v8::base::__tsan_atomic16_fetch_nand ( volatile __tsan_atomic16 a,
__tsan_atomic16  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic16_fetch_or()

__tsan_atomic16 v8::base::__tsan_atomic16_fetch_or ( volatile __tsan_atomic16 a,
__tsan_atomic16  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic16_fetch_xor()

__tsan_atomic16 v8::base::__tsan_atomic16_fetch_xor ( volatile __tsan_atomic16 a,
__tsan_atomic16  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic16_load()

__tsan_atomic16 v8::base::__tsan_atomic16_load ( const volatile __tsan_atomic16 a,
__tsan_memory_order  mo 
)

◆ __tsan_atomic16_store()

void v8::base::__tsan_atomic16_store ( volatile __tsan_atomic16 a,
__tsan_atomic16  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic32_compare_exchange_strong()

int v8::base::__tsan_atomic32_compare_exchange_strong ( volatile __tsan_atomic32 a,
__tsan_atomic32 c,
__tsan_atomic32  v,
__tsan_memory_order  mo,
__tsan_memory_order  fail_mo 
)

◆ __tsan_atomic32_compare_exchange_val()

__tsan_atomic32 v8::base::__tsan_atomic32_compare_exchange_val ( volatile __tsan_atomic32 a,
__tsan_atomic32  c,
__tsan_atomic32  v,
__tsan_memory_order  mo,
__tsan_memory_order  fail_mo 
)

◆ __tsan_atomic32_compare_exchange_weak()

int v8::base::__tsan_atomic32_compare_exchange_weak ( volatile __tsan_atomic32 a,
__tsan_atomic32 c,
__tsan_atomic32  v,
__tsan_memory_order  mo,
__tsan_memory_order  fail_mo 
)

◆ __tsan_atomic32_exchange()

__tsan_atomic32 v8::base::__tsan_atomic32_exchange ( volatile __tsan_atomic32 a,
__tsan_atomic32  v,
__tsan_memory_order  mo 
)

Referenced by Acquire_AtomicExchange(), and Release_AtomicExchange().

+ Here is the caller graph for this function:

◆ __tsan_atomic32_fetch_add()

__tsan_atomic32 v8::base::__tsan_atomic32_fetch_add ( volatile __tsan_atomic32 a,
__tsan_atomic32  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic32_fetch_and()

__tsan_atomic32 v8::base::__tsan_atomic32_fetch_and ( volatile __tsan_atomic32 a,
__tsan_atomic32  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic32_fetch_nand()

__tsan_atomic32 v8::base::__tsan_atomic32_fetch_nand ( volatile __tsan_atomic32 a,
__tsan_atomic32  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic32_fetch_or()

__tsan_atomic32 v8::base::__tsan_atomic32_fetch_or ( volatile __tsan_atomic32 a,
__tsan_atomic32  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic32_fetch_xor()

__tsan_atomic32 v8::base::__tsan_atomic32_fetch_xor ( volatile __tsan_atomic32 a,
__tsan_atomic32  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic32_load()

__tsan_atomic32 v8::base::__tsan_atomic32_load ( const volatile __tsan_atomic32 a,
__tsan_memory_order  mo 
)

◆ __tsan_atomic32_store()

void v8::base::__tsan_atomic32_store ( volatile __tsan_atomic32 a,
__tsan_atomic32  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic64_compare_exchange_strong()

int v8::base::__tsan_atomic64_compare_exchange_strong ( volatile __tsan_atomic64 a,
__tsan_atomic64 c,
__tsan_atomic64  v,
__tsan_memory_order  mo,
__tsan_memory_order  fail_mo 
)

◆ __tsan_atomic64_compare_exchange_val()

__tsan_atomic64 v8::base::__tsan_atomic64_compare_exchange_val ( volatile __tsan_atomic64 a,
__tsan_atomic64  c,
__tsan_atomic64  v,
__tsan_memory_order  mo,
__tsan_memory_order  fail_mo 
)

◆ __tsan_atomic64_compare_exchange_weak()

int v8::base::__tsan_atomic64_compare_exchange_weak ( volatile __tsan_atomic64 a,
__tsan_atomic64 c,
__tsan_atomic64  v,
__tsan_memory_order  mo,
__tsan_memory_order  fail_mo 
)

◆ __tsan_atomic64_exchange()

__tsan_atomic64 v8::base::__tsan_atomic64_exchange ( volatile __tsan_atomic64 a,
__tsan_atomic64  v,
__tsan_memory_order  mo 
)

Referenced by Acquire_AtomicExchange(), and Release_AtomicExchange().

+ Here is the caller graph for this function:

◆ __tsan_atomic64_fetch_add()

__tsan_atomic64 v8::base::__tsan_atomic64_fetch_add ( volatile __tsan_atomic64 a,
__tsan_atomic64  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic64_fetch_and()

__tsan_atomic64 v8::base::__tsan_atomic64_fetch_and ( volatile __tsan_atomic64 a,
__tsan_atomic64  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic64_fetch_nand()

__tsan_atomic64 v8::base::__tsan_atomic64_fetch_nand ( volatile __tsan_atomic64 a,
__tsan_atomic64  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic64_fetch_or()

__tsan_atomic64 v8::base::__tsan_atomic64_fetch_or ( volatile __tsan_atomic64 a,
__tsan_atomic64  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic64_fetch_xor()

__tsan_atomic64 v8::base::__tsan_atomic64_fetch_xor ( volatile __tsan_atomic64 a,
__tsan_atomic64  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic64_load()

__tsan_atomic64 v8::base::__tsan_atomic64_load ( const volatile __tsan_atomic64 a,
__tsan_memory_order  mo 
)

◆ __tsan_atomic64_store()

void v8::base::__tsan_atomic64_store ( volatile __tsan_atomic64 a,
__tsan_atomic64  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic8_compare_exchange_strong()

int v8::base::__tsan_atomic8_compare_exchange_strong ( volatile __tsan_atomic8 a,
__tsan_atomic8 c,
__tsan_atomic8  v,
__tsan_memory_order  mo,
__tsan_memory_order  fail_mo 
)

◆ __tsan_atomic8_compare_exchange_val()

__tsan_atomic8 v8::base::__tsan_atomic8_compare_exchange_val ( volatile __tsan_atomic8 a,
__tsan_atomic8  c,
__tsan_atomic8  v,
__tsan_memory_order  mo,
__tsan_memory_order  fail_mo 
)

◆ __tsan_atomic8_compare_exchange_weak()

int v8::base::__tsan_atomic8_compare_exchange_weak ( volatile __tsan_atomic8 a,
__tsan_atomic8 c,
__tsan_atomic8  v,
__tsan_memory_order  mo,
__tsan_memory_order  fail_mo 
)

◆ __tsan_atomic8_exchange()

__tsan_atomic8 v8::base::__tsan_atomic8_exchange ( volatile __tsan_atomic8 a,
__tsan_atomic8  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic8_fetch_add()

__tsan_atomic8 v8::base::__tsan_atomic8_fetch_add ( volatile __tsan_atomic8 a,
__tsan_atomic8  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic8_fetch_and()

__tsan_atomic8 v8::base::__tsan_atomic8_fetch_and ( volatile __tsan_atomic8 a,
__tsan_atomic8  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic8_fetch_nand()

__tsan_atomic8 v8::base::__tsan_atomic8_fetch_nand ( volatile __tsan_atomic8 a,
__tsan_atomic8  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic8_fetch_or()

__tsan_atomic8 v8::base::__tsan_atomic8_fetch_or ( volatile __tsan_atomic8 a,
__tsan_atomic8  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic8_fetch_xor()

__tsan_atomic8 v8::base::__tsan_atomic8_fetch_xor ( volatile __tsan_atomic8 a,
__tsan_atomic8  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic8_load()

__tsan_atomic8 v8::base::__tsan_atomic8_load ( const volatile __tsan_atomic8 a,
__tsan_memory_order  mo 
)

◆ __tsan_atomic8_store()

void v8::base::__tsan_atomic8_store ( volatile __tsan_atomic8 a,
__tsan_atomic8  v,
__tsan_memory_order  mo 
)

◆ __tsan_atomic_signal_fence()

void v8::base::__tsan_atomic_signal_fence ( __tsan_memory_order  mo)

◆ __tsan_atomic_thread_fence()

void v8::base::__tsan_atomic_thread_fence ( __tsan_memory_order  mo)

◆ Acquire_AtomicExchange() [1/2]

Atomic32 v8::base::Acquire_AtomicExchange ( volatile Atomic32 ptr,
Atomic32  new_value 
)
inline

Definition at line 200 of file atomicops_internals_tsan.h.

201  {
202  return __tsan_atomic32_exchange(ptr, new_value,
204 }
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)

References __tsan_atomic32_exchange(), and __tsan_memory_order_acquire.

+ Here is the call graph for this function:

◆ Acquire_AtomicExchange() [2/2]

Atomic64 v8::base::Acquire_AtomicExchange ( volatile Atomic64 *  ptr,
Atomic64  new_value 
)
inline

Definition at line 290 of file atomicops_internals_tsan.h.

291  {
293 }
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)

References __tsan_atomic64_exchange(), and __tsan_memory_order_acquire.

+ Here is the call graph for this function:

◆ Acquire_CompareAndSwap() [1/3]

Atomic32 v8::base::Acquire_CompareAndSwap ( volatile Atomic32 ptr,
Atomic32  old_value,
Atomic32  new_value 
)
inline

Definition at line 102 of file atomicops_internals_arm64_gcc.h.

104  {
105  Atomic32 prev;
106 
107  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
108  MemoryBarrier();
109 
110  return prev;
111 }
int32_t Atomic32
Definition: atomicops.h:44
Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, Atomic64 old_value, Atomic64 new_value)

References MemoryBarrier(), and NoBarrier_CompareAndSwap().

Referenced by Acquire_CompareAndSwap(), CallOnceImpl(), and v8::internal::MemoryChunk::TryParallelSweeping().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ Acquire_CompareAndSwap() [2/3]

Atomic64 v8::base::Acquire_CompareAndSwap ( volatile Atomic64 *  ptr,
Atomic64  old_value,
Atomic64  new_value 
)
inline

Definition at line 252 of file atomicops_internals_arm64_gcc.h.

254  {
255  Atomic64 prev;
256 
257  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
258  MemoryBarrier();
259 
260  return prev;
261 }

References MemoryBarrier(), and NoBarrier_CompareAndSwap().

+ Here is the call graph for this function:

◆ Acquire_CompareAndSwap() [3/3]

AtomicWord v8::base::Acquire_CompareAndSwap ( volatile AtomicWord ptr,
AtomicWord  old_value,
AtomicWord  new_value 
)
inline

Definition at line 51 of file atomicops_internals_atomicword_compat.h.

53  {
55  reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
56 }
Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)

References Acquire_CompareAndSwap().

+ Here is the call graph for this function:

◆ Acquire_Load() [1/3]

Atomic32 v8::base::Acquire_Load ( volatile const Atomic32 ptr)
inline

Definition at line 154 of file atomicops_internals_arm64_gcc.h.

154  {
155  Atomic32 value;
156 
157  __asm__ __volatile__ ( // NOLINT
158  "ldar %w[value], %[ptr] \n\t"
159  : [value]"=r" (value)
160  : [ptr]"Q" (*ptr)
161  : "memory"
162  ); // NOLINT
163 
164  return value;
165 }

Referenced by Acquire_Load(), CallOnce(), CallOnceImpl(), v8::internal::MemoryChunk::next_chunk(), v8::internal::MemoryChunk::parallel_sweeping(), v8::internal::SamplingCircularQueue< T, Length >::Peek(), v8::internal::MemoryChunk::prev_chunk(), v8::internal::SweeperThread::Run(), v8::internal::OptimizingCompilerThread::Run(), and v8::internal::SamplingCircularQueue< T, Length >::StartEnqueue().

+ Here is the caller graph for this function:

◆ Acquire_Load() [2/3]

Atomic64 v8::base::Acquire_Load ( volatile const Atomic64 *  ptr)
inline

Definition at line 296 of file atomicops_internals_arm64_gcc.h.

296  {
297  Atomic64 value;
298 
299  __asm__ __volatile__ ( // NOLINT
300  "ldar %x[value], %[ptr] \n\t"
301  : [value]"=r" (value)
302  : [ptr]"Q" (*ptr)
303  : "memory"
304  ); // NOLINT
305 
306  return value;
307 }

◆ Acquire_Load() [3/3]

AtomicWord v8::base::Acquire_Load ( volatile const AtomicWord ptr)
inline

Definition at line 85 of file atomicops_internals_atomicword_compat.h.

85  {
87  reinterpret_cast<volatile const Atomic32*>(ptr));
88 }
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)

References Acquire_Load().

+ Here is the call graph for this function:

◆ Acquire_Store() [1/3]

void v8::base::Acquire_Store ( volatile Atomic32 ptr,
Atomic32  value 
)
inline

Definition at line 132 of file atomicops_internals_arm64_gcc.h.

132  {
133  *ptr = value;
134  MemoryBarrier();
135 }

References MemoryBarrier().

Referenced by Acquire_Store().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ Acquire_Store() [2/3]

void v8::base::Acquire_Store ( volatile Atomic64 *  ptr,
Atomic64  value 
)
inline

Definition at line 278 of file atomicops_internals_arm64_gcc.h.

278  {
279  *ptr = value;
280  MemoryBarrier();
281 }

References MemoryBarrier().

+ Here is the call graph for this function:

◆ Acquire_Store() [3/3]

void v8::base::Acquire_Store ( volatile AtomicWord ptr,
AtomicWord  value 
)
inline

Definition at line 70 of file atomicops_internals_atomicword_compat.h.

70  {
72  reinterpret_cast<volatile Atomic32*>(ptr), value);
73 }
void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value)

References Acquire_Store().

+ Here is the call graph for this function:

◆ Barrier_AtomicIncrement() [1/3]

Atomic32 v8::base::Barrier_AtomicIncrement ( volatile Atomic32 ptr,
Atomic32  increment 
)
inline

Definition at line 91 of file atomicops_internals_arm64_gcc.h.

92  {
93  Atomic32 result;
94 
95  MemoryBarrier();
96  result = NoBarrier_AtomicIncrement(ptr, increment);
97  MemoryBarrier();
98 
99  return result;
100 }
Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr, Atomic64 increment)

References MemoryBarrier(), and NoBarrier_AtomicIncrement().

Referenced by Barrier_AtomicIncrement().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ Barrier_AtomicIncrement() [2/3]

Atomic64 v8::base::Barrier_AtomicIncrement ( volatile Atomic64 *  ptr,
Atomic64  increment 
)
inline

Definition at line 241 of file atomicops_internals_arm64_gcc.h.

242  {
243  Atomic64 result;
244 
245  MemoryBarrier();
246  result = NoBarrier_AtomicIncrement(ptr, increment);
247  MemoryBarrier();
248 
249  return result;
250 }

References MemoryBarrier(), and NoBarrier_AtomicIncrement().

+ Here is the call graph for this function:

◆ Barrier_AtomicIncrement() [3/3]

AtomicWord v8::base::Barrier_AtomicIncrement ( volatile AtomicWord ptr,
AtomicWord  increment 
)
inline

Definition at line 45 of file atomicops_internals_atomicword_compat.h.

46  {
48  reinterpret_cast<volatile Atomic32*>(ptr), increment);
49 }
AtomicWord Barrier_AtomicIncrement(volatile AtomicWord *ptr, AtomicWord increment)

References Barrier_AtomicIncrement().

+ Here is the call graph for this function:

◆ BOOL() [1/7]

typedef v8::base::BOOL ( __stdcall *  DLL_FUNC_TYPEModule32FirstW)

◆ BOOL() [2/7]

typedef v8::base::BOOL ( __stdcall *  DLL_FUNC_TYPEModule32NextW)

◆ BOOL() [3/7]

typedef v8::base::BOOL ( __stdcall *  DLL_FUNC_TYPEStackWalk64)

◆ BOOL() [4/7]

typedef v8::base::BOOL ( __stdcall *  DLL_FUNC_TYPESymGetLineFromAddr64)

◆ BOOL() [5/7]

typedef v8::base::BOOL ( __stdcall *  DLL_FUNC_TYPESymGetSearchPath)

◆ BOOL() [6/7]

typedef v8::base::BOOL ( __stdcall *  DLL_FUNC_TYPESymGetSymFromAddr64)

◆ BOOL() [7/7]

typedef v8::base::BOOL ( __stdcall *  DLL_FUNC_TYPESymInitialize)

Referenced by LoadSymbols().

+ Here is the caller graph for this function:

◆ CallOnce() [1/2]

void v8::base::CallOnce ( OnceType once,
NoArgFunction  init_func 
)
inline

Definition at line 82 of file once.h.

82  {
83  if (Acquire_Load(once) != ONCE_STATE_DONE) {
84  CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func), NULL);
85  }
86 }
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
void(* PointerArgFunction)(void *arg)
Definition: once.h:73
void CallOnceImpl(OnceType *once, PointerArgFunction init_func, void *arg)
Definition: once.cc:18

References Acquire_Load(), CallOnceImpl(), NULL, and ONCE_STATE_DONE.

Referenced by v8::internal::BuiltinFunctionTable::functions(), v8::base::ThreadSafeInitOnceTrait::Init(), v8::internal::V8::InitializeOncePerProcess(), and v8::internal::Heap::SetUp().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ CallOnce() [2/2]

template<typename Arg >
void v8::base::CallOnce ( OnceType once,
typename OneArgFunction< Arg * >::type  init_func,
Arg *  arg 
)
inline

Definition at line 90 of file once.h.

91  {
92  if (Acquire_Load(once) != ONCE_STATE_DONE) {
93  CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
94  static_cast<void*>(arg));
95  }
96 }

References Acquire_Load(), CallOnceImpl(), and ONCE_STATE_DONE.

+ Here is the call graph for this function:

◆ CallOnceImpl()

void v8::base::CallOnceImpl ( OnceType once,
PointerArgFunction  init_func,
void *  arg 
)

Definition at line 18 of file once.cc.

18  {
19  AtomicWord state = Acquire_Load(once);
20  // Fast path. The provided function was already executed.
21  if (state == ONCE_STATE_DONE) {
22  return;
23  }
24 
25  // The function execution did not complete yet. The once object can be in one
26  // of the two following states:
27  // - UNINITIALIZED: We are the first thread calling this function.
28  // - EXECUTING_FUNCTION: Another thread is already executing the function.
29  //
30  // First, try to change the state from UNINITIALIZED to EXECUTING_FUNCTION
31  // atomically.
32  state = Acquire_CompareAndSwap(
34  if (state == ONCE_STATE_UNINITIALIZED) {
35  // We are the first thread to call this function, so we have to call the
36  // function.
37  init_func(arg);
39  } else {
40  // Another thread has already started executing the function. We need to
41  // wait until it completes the initialization.
42  while (state == ONCE_STATE_EXECUTING_FUNCTION) {
43 #ifdef _WIN32
44  ::Sleep(0);
45 #else
46  sched_yield();
47 #endif
48  state = Acquire_Load(once);
49  }
50  }
51 }
intptr_t AtomicWord
Definition: atomicops.h:57
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)

References Acquire_CompareAndSwap(), Acquire_Load(), ONCE_STATE_DONE, ONCE_STATE_EXECUTING_FUNCTION, ONCE_STATE_UNINITIALIZED, and Release_Store().

Referenced by CallOnce().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ checked_cast()

template<typename Dst , typename Src >
Dst v8::base::checked_cast ( Src  value)
inline

Definition at line 30 of file safe_conversions.h.

30  {
31  CHECK(IsValueInRangeForNumericType<Dst>(value));
32  return static_cast<Dst>(value);
33 }
#define CHECK(condition)
Definition: logging.h:36

References CHECK.

◆ DumpBacktrace()

void v8::base::DumpBacktrace ( )

Definition at line 22 of file logging.cc.

22  {
23 #if V8_LIBC_GLIBC || V8_OS_BSD
24  void* trace[100];
25  int size = backtrace(trace, arraysize(trace));
26  char** symbols = backtrace_symbols(trace, size);
27  OS::PrintError("\n==== C stack trace ===============================\n\n");
28  if (size == 0) {
29  OS::PrintError("(empty)\n");
30  } else if (symbols == NULL) {
31  OS::PrintError("(no symbols)\n");
32  } else {
33  for (int i = 1; i < size; ++i) {
34  OS::PrintError("%2d: ", i);
35  char mangled[201];
36  if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
37  int status;
38  size_t length;
39  char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
40  OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
41  free(demangled);
42  } else {
43  OS::PrintError("??\n");
44  }
45  }
46  }
47  free(symbols);
48 #elif V8_OS_QNX
49  char out[1024];
50  bt_accessor_t acc;
51  bt_memmap_t memmap;
52  bt_init_accessor(&acc, BT_SELF);
53  bt_load_memmap(&acc, &memmap);
54  bt_sprn_memmap(&memmap, out, sizeof(out));
55  OS::PrintError(out);
56  bt_addr_t trace[100];
57  int size = bt_get_backtrace(&acc, trace, arraysize(trace));
58  OS::PrintError("\n==== C stack trace ===============================\n\n");
59  if (size == 0) {
60  OS::PrintError("(empty)\n");
61  } else {
62  bt_sprnf_addrs(&memmap, trace, size, const_cast<char*>("%a\n"),
63  out, sizeof(out), NULL);
64  OS::PrintError(out);
65  }
66  bt_unload_memmap(&memmap);
67  bt_release_accessor(&acc);
68 #endif // V8_LIBC_GLIBC || V8_OS_BSD
69 }
enable harmony numeric enable harmony object literal extensions Optimize object size
#define arraysize(array)
Definition: macros.h:86

References arraysize, NULL, v8::base::OS::PrintError(), and size.

Referenced by V8_Fatal().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ DWORD() [1/2]

typedef v8::base::DWORD ( __stdcall *  DLL_FUNC_TYPESymGetOptions)

Referenced by v8::base::OS::Guard(), loadiJIT_Funcs(), LoadSymbols(), v8::base::OS::ProtectCode(), and v8::base::Win32Time::SetToCurrentTime().

+ Here is the caller graph for this function:

◆ DWORD() [2/2]

typedef v8::base::DWORD ( __stdcall *  DLL_FUNC_TYPESymSetOptions)

◆ DWORD64() [1/2]

typedef v8::base::DWORD64 ( __stdcall *  DLL_FUNC_TYPESymGetModuleBase64)

◆ DWORD64() [2/2]

typedef v8::base::DWORD64 ( __stdcall *  DLL_FUNC_TYPESymLoadModule64)

Referenced by LoadSymbols().

+ Here is the caller graph for this function:

◆ GetPageSize()

static size_t v8::base::GetPageSize ( )
static

Definition at line 699 of file platform-win32.cc.

699  {
700  static size_t page_size = 0;
701  if (page_size == 0) {
702  SYSTEM_INFO info;
703  GetSystemInfo(&info);
704  page_size = base::bits::RoundUpToPowerOfTwo32(info.dwPageSize);
705  }
706  return page_size;
707 }
uint32_t RoundUpToPowerOfTwo32(uint32_t value)
Definition: bits.cc:12

References v8::base::bits::RoundUpToPowerOfTwo32().

+ Here is the call graph for this function:

◆ HANDLE()

typedef v8::base::HANDLE ( __stdcall *  DLL_FUNC_TYPECreateToolhelp32Snapshot)

Referenced by LoadSymbols().

+ Here is the caller graph for this function:

◆ HasConsole()

static bool v8::base::HasConsole ( )
static

Definition at line 543 of file platform-win32.cc.

543  {
544  // Only check the first time. Eventual race conditions are not a problem,
545  // because all threads will eventually determine the same mode.
546  if (output_mode == UNKNOWN) {
547  // We cannot just check that the standard output is attached to a console
548  // because this would fail if output is redirected to a file. Therefore we
549  // say that a process does not have an output console if either the
550  // standard output handle is invalid or its file type is unknown.
551  if (GetStdHandle(STD_OUTPUT_HANDLE) != INVALID_HANDLE_VALUE &&
552  GetFileType(GetStdHandle(STD_OUTPUT_HANDLE)) != FILE_TYPE_UNKNOWN)
554  else
555  output_mode = ODS;
556  }
557  return output_mode == CONSOLE;
558 }
static OutputMode output_mode

References CONSOLE, ODS, output_mode, and UNKNOWN.

Referenced by VPrintHelper().

+ Here is the caller graph for this function:

◆ INSTANTIATE_TEST_CASE_P()

v8::base::INSTANTIATE_TEST_CASE_P ( RandomSeeds  ,
RandomNumberGeneratorTest  ,
::testing::Values(INT_MIN, -1, 0, 1, 42, 100, 1234567890, 987654321, INT_MAX)   
)

◆ IsValueInRangeForNumericType()

template<typename Dst , typename Src >
bool v8::base::IsValueInRangeForNumericType ( Src  value)
inline

Definition at line 21 of file safe_conversions.h.

21  {
22  return internal::DstRangeRelationToSrcRange<Dst>(value) ==
24 }

References v8::base::internal::RANGE_VALID.

◆ LoadDbgHelpAndTlHelp32()

static bool v8::base::LoadDbgHelpAndTlHelp32 ( )
static

Definition at line 1020 of file platform-win32.cc.

1020  {
1021  static bool dbghelp_loaded = false;
1022 
1023  if (dbghelp_loaded) return true;
1024 
1025  HMODULE module;
1026 
1027  // Load functions from the dbghelp.dll module.
1028  module = LoadLibrary(TEXT("dbghelp.dll"));
1029  if (module == NULL) {
1030  return false;
1031  }
1032 
1033 #define LOAD_DLL_FUNC(name) \
1034  DLL_FUNC_VAR(name) = \
1035  reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
1036 
1038 
1039 #undef LOAD_DLL_FUNC
1040 
1041  // Load functions from the kernel32.dll module (the TlHelp32.h function used
1042  // to be in tlhelp32.dll but are now moved to kernel32.dll).
1043  module = LoadLibrary(TEXT("kernel32.dll"));
1044  if (module == NULL) {
1045  return false;
1046  }
1047 
1048 #define LOAD_DLL_FUNC(name) \
1049  DLL_FUNC_VAR(name) = \
1050  reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
1051 
1053 
1054 #undef LOAD_DLL_FUNC
1055 
1056  // Check that all functions where loaded.
1057  bool result =
1058 #define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) &&
1059 
1062 
1063 #undef DLL_FUNC_LOADED
1064  true;
1065 
1066  dbghelp_loaded = result;
1067  return result;
1068  // NOTE: The modules are never unloaded and will stay around until the
1069  // application is closed.
1070 }
#define DLL_FUNC_LOADED(name)
#define TLHELP32_FUNCTION_LIST(V)
#define LOAD_DLL_FUNC(name)
#define DBGHELP_FUNCTION_LIST(V)

References DBGHELP_FUNCTION_LIST, DLL_FUNC_LOADED, LOAD_DLL_FUNC, NULL, and TLHELP32_FUNCTION_LIST.

◆ LoadSymbols()

static std::vector<OS::SharedLibraryAddress> v8::base::LoadSymbols ( HANDLE  process_handle)
static

Definition at line 1079 of file platform-win32.cc.

1080  {
1081  static std::vector<OS::SharedLibraryAddress> result;
1082 
1083  static bool symbols_loaded = false;
1084 
1085  if (symbols_loaded) return result;
1086 
1087  BOOL ok;
1088 
1089  // Initialize the symbol engine.
1090  ok = _SymInitialize(process_handle, // hProcess
1091  NULL, // UserSearchPath
1092  false); // fInvadeProcess
1093  if (!ok) return result;
1094 
1095  DWORD options = _SymGetOptions();
1096  options |= SYMOPT_LOAD_LINES;
1097  options |= SYMOPT_FAIL_CRITICAL_ERRORS;
1098  options = _SymSetOptions(options);
1099 
1100  char buf[OS::kStackWalkMaxNameLen] = {0};
1101  ok = _SymGetSearchPath(process_handle, buf, OS::kStackWalkMaxNameLen);
1102  if (!ok) {
1103  int err = GetLastError();
1104  OS::Print("%d\n", err);
1105  return result;
1106  }
1107 
1108  HANDLE snapshot = _CreateToolhelp32Snapshot(
1109  TH32CS_SNAPMODULE, // dwFlags
1110  GetCurrentProcessId()); // th32ProcessId
1111  if (snapshot == INVALID_HANDLE_VALUE) return result;
1112  MODULEENTRY32W module_entry;
1113  module_entry.dwSize = sizeof(module_entry); // Set the size of the structure.
1114  BOOL cont = _Module32FirstW(snapshot, &module_entry);
1115  while (cont) {
1116  DWORD64 base;
1117  // NOTE the SymLoadModule64 function has the peculiarity of accepting a
1118  // both unicode and ASCII strings even though the parameter is PSTR.
1119  base = _SymLoadModule64(
1120  process_handle, // hProcess
1121  0, // hFile
1122  reinterpret_cast<PSTR>(module_entry.szExePath), // ImageName
1123  reinterpret_cast<PSTR>(module_entry.szModule), // ModuleName
1124  reinterpret_cast<DWORD64>(module_entry.modBaseAddr), // BaseOfDll
1125  module_entry.modBaseSize); // SizeOfDll
1126  if (base == 0) {
1127  int err = GetLastError();
1128  if (err != ERROR_MOD_NOT_FOUND &&
1129  err != ERROR_INVALID_HANDLE) {
1130  result.clear();
1131  return result;
1132  }
1133  }
1134  int lib_name_length = WideCharToMultiByte(
1135  CP_UTF8, 0, module_entry.szExePath, -1, NULL, 0, NULL, NULL);
1136  std::string lib_name(lib_name_length, 0);
1137  WideCharToMultiByte(CP_UTF8, 0, module_entry.szExePath, -1, &lib_name[0],
1138  lib_name_length, NULL, NULL);
1139  result.push_back(OS::SharedLibraryAddress(
1140  lib_name, reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
1141  reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
1142  module_entry.modBaseSize)));
1143  cont = _Module32NextW(snapshot, &module_entry);
1144  }
1145  CloseHandle(snapshot);
1146 
1147  symbols_loaded = true;
1148  return result;
1149 }
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the snapshot(mksnapshot only)") DEFINE_STRING(raw_file
typedef HANDLE(__stdcall *DLL_FUNC_TYPE(CreateToolhelp32Snapshot))(DWORD dwFlags
typedef DWORD64(__stdcall *DLL_FUNC_TYPE(SymGetModuleBase64))(HANDLE hProcess
typedef BOOL(__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot
typedef DWORD(__stdcall *DLL_FUNC_TYPE(SymSetOptions))(IN DWORD SymOptions)

References BOOL(), DWORD(), DWORD64(), v8::base::OS::GetCurrentProcessId(), v8::base::OS::GetLastError(), HANDLE(), v8::base::OS::kStackWalkMaxNameLen, NULL, v8::base::OS::Print(), and snapshot().

+ Here is the call graph for this function:

◆ LocalKeyToPthreadKey()

static pthread_key_t v8::base::LocalKeyToPthreadKey ( Thread::LocalStorageKey  local_key)
static

Definition at line 568 of file platform-posix.cc.

568  {
569 #if V8_OS_CYGWIN
570  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
571  intptr_t ptr_key = static_cast<intptr_t>(local_key);
572  return reinterpret_cast<pthread_key_t>(ptr_key);
573 #else
574  return static_cast<pthread_key_t>(local_key);
575 #endif
576 }
#define STATIC_ASSERT(test)
Definition: macros.h:311

References STATIC_ASSERT.

Referenced by v8::base::Thread::DeleteThreadLocalKey(), v8::base::Thread::GetThreadLocal(), and v8::base::Thread::SetThreadLocal().

+ Here is the caller graph for this function:

◆ MemoryBarrier()

void v8::base::MemoryBarrier ( )
inline

Definition at line 13 of file atomicops_internals_arm64_gcc.h.

13  {
14  __asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT
15 }

Referenced by Acquire_CompareAndSwap(), Acquire_Store(), Barrier_AtomicIncrement(), v8::internal::SamplingCircularQueue< T, Length >::Peek(), Release_CompareAndSwap(), Release_Load(), and v8::internal::SamplingCircularQueue< T, Length >::StartEnqueue().

+ Here is the caller graph for this function:

◆ NoBarrier_AtomicExchange() [1/3]

Atomic32 v8::base::NoBarrier_AtomicExchange ( volatile Atomic32 ptr,
Atomic32  new_value 
)
inline

Definition at line 50 of file atomicops_internals_arm64_gcc.h.

51  {
52  Atomic32 result;
53  int32_t temp;
54 
55  __asm__ __volatile__ ( // NOLINT
56  "0: \n\t"
57  "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
58  "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
59  "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
60  : [result]"=&r" (result),
61  [temp]"=&r" (temp),
62  [ptr]"+Q" (*ptr)
63  : [new_value]"r" (new_value)
64  : "memory"
65  ); // NOLINT
66 
67  return result;
68 }
int int32_t
Definition: unicode.cc:24

Referenced by NoBarrier_AtomicExchange().

+ Here is the caller graph for this function:

◆ NoBarrier_AtomicExchange() [2/3]

Atomic64 v8::base::NoBarrier_AtomicExchange ( volatile Atomic64 *  ptr,
Atomic64  new_value 
)
inline

Definition at line 200 of file atomicops_internals_arm64_gcc.h.

201  {
202  Atomic64 result;
203  int32_t temp;
204 
205  __asm__ __volatile__ ( // NOLINT
206  "0: \n\t"
207  "ldxr %[result], %[ptr] \n\t"
208  "stxr %w[temp], %[new_value], %[ptr] \n\t"
209  "cbnz %w[temp], 0b \n\t"
210  : [result]"=&r" (result),
211  [temp]"=&r" (temp),
212  [ptr]"+Q" (*ptr)
213  : [new_value]"r" (new_value)
214  : "memory"
215  ); // NOLINT
216 
217  return result;
218 }

◆ NoBarrier_AtomicExchange() [3/3]

AtomicWord v8::base::NoBarrier_AtomicExchange ( volatile AtomicWord ptr,
AtomicWord  new_value 
)
inline

Definition at line 33 of file atomicops_internals_atomicword_compat.h.

34  {
36  reinterpret_cast<volatile Atomic32*>(ptr), new_value);
37 }
AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord *ptr, AtomicWord new_value)

References NoBarrier_AtomicExchange().

+ Here is the call graph for this function:

◆ NoBarrier_AtomicIncrement() [1/3]

Atomic32 v8::base::NoBarrier_AtomicIncrement ( volatile Atomic32 ptr,
Atomic32  increment 
)
inline

Definition at line 70 of file atomicops_internals_arm64_gcc.h.

71  {
72  Atomic32 result;
73  int32_t temp;
74 
75  __asm__ __volatile__ ( // NOLINT
76  "0: \n\t"
77  "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
78  "add %w[result], %w[result], %w[increment]\n\t"
79  "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
80  "cbnz %w[temp], 0b \n\t" // Retry on failure.
81  : [result]"=&r" (result),
82  [temp]"=&r" (temp),
83  [ptr]"+Q" (*ptr)
84  : [increment]"IJr" (increment)
85  : "memory"
86  ); // NOLINT
87 
88  return result;
89 }

Referenced by v8::internal::ThreadId::AllocateThreadId(), Barrier_AtomicIncrement(), v8::internal::Sampler::DecreaseProfilingDepth(), v8::internal::Sampler::IncreaseProfilingDepth(), v8::internal::Isolate::Isolate(), and NoBarrier_AtomicIncrement().

+ Here is the caller graph for this function:

◆ NoBarrier_AtomicIncrement() [2/3]

Atomic64 v8::base::NoBarrier_AtomicIncrement ( volatile Atomic64 *  ptr,
Atomic64  increment 
)
inline

Definition at line 220 of file atomicops_internals_arm64_gcc.h.

221  {
222  Atomic64 result;
223  int32_t temp;
224 
225  __asm__ __volatile__ ( // NOLINT
226  "0: \n\t"
227  "ldxr %[result], %[ptr] \n\t"
228  "add %[result], %[result], %[increment] \n\t"
229  "stxr %w[temp], %[result], %[ptr] \n\t"
230  "cbnz %w[temp], 0b \n\t"
231  : [result]"=&r" (result),
232  [temp]"=&r" (temp),
233  [ptr]"+Q" (*ptr)
234  : [increment]"IJr" (increment)
235  : "memory"
236  ); // NOLINT
237 
238  return result;
239 }

◆ NoBarrier_AtomicIncrement() [3/3]

AtomicWord v8::base::NoBarrier_AtomicIncrement ( volatile AtomicWord ptr,
AtomicWord  increment 
)
inline

Definition at line 39 of file atomicops_internals_atomicword_compat.h.

40  {
42  reinterpret_cast<volatile Atomic32*>(ptr), increment);
43 }
AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord *ptr, AtomicWord increment)

References NoBarrier_AtomicIncrement().

+ Here is the call graph for this function:

◆ NoBarrier_CompareAndSwap() [1/3]

Atomic32 v8::base::NoBarrier_CompareAndSwap ( volatile Atomic32 ptr,
Atomic32  old_value,
Atomic32  new_value 
)
inline

Definition at line 25 of file atomicops_internals_arm64_gcc.h.

27  {
28  Atomic32 prev;
29  int32_t temp;
30 
31  __asm__ __volatile__ ( // NOLINT
32  "0: \n\t"
33  "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
34  "cmp %w[prev], %w[old_value] \n\t"
35  "bne 1f \n\t"
36  "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
37  "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
38  "1: \n\t"
39  : [prev]"=&r" (prev),
40  [temp]"=&r" (temp),
41  [ptr]"+Q" (*ptr)
42  : [old_value]"IJr" (old_value),
43  [new_value]"r" (new_value)
44  : "cc", "memory"
45  ); // NOLINT
46 
47  return prev;
48 }

Referenced by Acquire_CompareAndSwap(), NoBarrier_CompareAndSwap(), Release_CompareAndSwap(), and v8::internal::UpdatePointer().

+ Here is the caller graph for this function:

◆ NoBarrier_CompareAndSwap() [2/3]

Atomic64 v8::base::NoBarrier_CompareAndSwap ( volatile Atomic64 *  ptr,
Atomic64  old_value,
Atomic64  new_value 
)
inline

Definition at line 175 of file atomicops_internals_arm64_gcc.h.

177  {
178  Atomic64 prev;
179  int32_t temp;
180 
181  __asm__ __volatile__ ( // NOLINT
182  "0: \n\t"
183  "ldxr %[prev], %[ptr] \n\t"
184  "cmp %[prev], %[old_value] \n\t"
185  "bne 1f \n\t"
186  "stxr %w[temp], %[new_value], %[ptr] \n\t"
187  "cbnz %w[temp], 0b \n\t"
188  "1: \n\t"
189  : [prev]"=&r" (prev),
190  [temp]"=&r" (temp),
191  [ptr]"+Q" (*ptr)
192  : [old_value]"IJr" (old_value),
193  [new_value]"r" (new_value)
194  : "cc", "memory"
195  ); // NOLINT
196 
197  return prev;
198 }

◆ NoBarrier_CompareAndSwap() [3/3]

AtomicWord v8::base::NoBarrier_CompareAndSwap ( volatile AtomicWord ptr,
AtomicWord  old_value,
AtomicWord  new_value 
)
inline

Definition at line 26 of file atomicops_internals_atomicword_compat.h.

28  {
30  reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
31 }
AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord *ptr, AtomicWord old_value, AtomicWord new_value)

References NoBarrier_CompareAndSwap().

+ Here is the call graph for this function:

◆ NoBarrier_Load() [1/4]

Atomic32 v8::base::NoBarrier_Load ( volatile const Atomic32 ptr)
inline

Definition at line 150 of file atomicops_internals_arm64_gcc.h.

150  {
151  return *ptr;
152 }

◆ NoBarrier_Load() [2/4]

Atomic64 v8::base::NoBarrier_Load ( volatile const Atomic64 *  ptr)
inline

Definition at line 292 of file atomicops_internals_arm64_gcc.h.

292  {
293  return *ptr;
294 }

◆ NoBarrier_Load() [3/4]

Atomic8 v8::base::NoBarrier_Load ( volatile const Atomic8 ptr)
inline

Definition at line 146 of file atomicops_internals_arm64_gcc.h.

146  {
147  return *ptr;
148 }

Referenced by v8::internal::StoreBuffer::FindPointersToNewSpaceInRegion(), v8::internal::Sampler::IsActive(), v8::internal::Sampler::IsProfiling(), v8::internal::StoreBuffer::IteratePointersInStoreBuffer(), NoBarrier_Load(), and v8::internal::FreeListCategory::top().

+ Here is the caller graph for this function:

◆ NoBarrier_Load() [4/4]

AtomicWord v8::base::NoBarrier_Load ( volatile const AtomicWord ptr)
inline

Definition at line 80 of file atomicops_internals_atomicword_compat.h.

80  {
81  return NoBarrier_Load(
82  reinterpret_cast<volatile const Atomic32*>(ptr));
83 }
AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr)

References NoBarrier_Load().

+ Here is the call graph for this function:

◆ NoBarrier_Store() [1/4]

void v8::base::NoBarrier_Store ( volatile Atomic32 ptr,
Atomic32  value 
)
inline

Definition at line 128 of file atomicops_internals_arm64_gcc.h.

128  {
129  *ptr = value;
130 }

◆ NoBarrier_Store() [2/4]

void v8::base::NoBarrier_Store ( volatile Atomic64 *  ptr,
Atomic64  value 
)
inline

Definition at line 274 of file atomicops_internals_arm64_gcc.h.

274  {
275  *ptr = value;
276 }

◆ NoBarrier_Store() [3/4]

void v8::base::NoBarrier_Store ( volatile Atomic8 ptr,
Atomic8  value 
)
inline

◆ NoBarrier_Store() [4/4]

void v8::base::NoBarrier_Store ( volatile AtomicWord ptr,
AtomicWord  value 
)
inline

Definition at line 65 of file atomicops_internals_atomicword_compat.h.

65  {
67  reinterpret_cast<volatile Atomic32*>(ptr), value);
68 }
void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value)

References NoBarrier_Store().

+ Here is the call graph for this function:

◆ operator+() [1/2]

Time v8::base::operator+ ( const TimeDelta &  delta,
const Time &  time 
)
inline

Definition at line 283 of file time.h.

283  {
284  return time + delta;
285 }

◆ operator+() [2/2]

TimeTicks v8::base::operator+ ( const TimeDelta &  delta,
const TimeTicks &  ticks 
)
inline

Definition at line 394 of file time.h.

394  {
395  return ticks + delta;
396 }

◆ operator<<()

template<class T >
std::ostream& v8::base::operator<< ( std::ostream &  os,
const MagicNumbersForDivision< T > &  mag 
)

Definition at line 17 of file division-by-constant-unittest.cc.

18  {
19  return os << "{ multiplier: " << mag.multiplier << ", shift: " << mag.shift
20  << ", add: " << mag.add << " }";
21 }

References v8::base::MagicNumbersForDivision< T >::add, v8::base::MagicNumbersForDivision< T >::multiplier, and v8::base::MagicNumbersForDivision< T >::shift.

◆ PthreadKeyToLocalKey()

static Thread::LocalStorageKey v8::base::PthreadKeyToLocalKey ( pthread_key_t  pthread_key)
static

Definition at line 554 of file platform-posix.cc.

554  {
555 #if V8_OS_CYGWIN
556  // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
557  // because pthread_key_t is a pointer type on Cygwin. This will probably not
558  // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
559  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
560  intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
561  return static_cast<Thread::LocalStorageKey>(ptr_key);
562 #else
563  return static_cast<Thread::LocalStorageKey>(pthread_key);
564 #endif
565 }

References STATIC_ASSERT.

Referenced by v8::base::Thread::CreateThreadLocalKey().

+ Here is the caller graph for this function:

◆ PVOID()

typedef v8::base::PVOID ( __stdcall *  DLL_FUNC_TYPESymFunctionTableAccess64)

◆ RandomizedVirtualAlloc() [1/2]

static void* v8::base::RandomizedVirtualAlloc ( size_t  size,
int  action,
int  protection 
)
static

Definition at line 182 of file platform-cygwin.cc.

182  {
183  LPVOID base = NULL;
184 
185  if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
186  // For exectutable pages try and randomize the allocation address
187  for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
188  base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
189  }
190  }
191 
192  // After three attempts give up and let the OS find an address to use.
193  if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
194 
195  return base;
196 }

References v8::base::OS::GetRandomMmapAddr(), NULL, and size.

Referenced by v8::base::VirtualMemory::ReserveRegion().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ RandomizedVirtualAlloc() [2/2]

static void* v8::base::RandomizedVirtualAlloc ( size_t  size,
int  action,
int  protection 
)
static

Definition at line 757 of file platform-win32.cc.

757  {
758  LPVOID base = NULL;
759 
760  if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
761  // For exectutable pages try and randomize the allocation address
762  for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
763  base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
764  }
765  }
766 
767  // After three attempts give up and let the OS find an address to use.
768  if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
769 
770  return base;
771 }

References v8::base::OS::GetRandomMmapAddr(), NULL, and size.

+ Here is the call graph for this function:

◆ Release_AtomicExchange() [1/2]

Atomic32 v8::base::Release_AtomicExchange ( volatile Atomic32 ptr,
Atomic32  new_value 
)
inline

Definition at line 206 of file atomicops_internals_tsan.h.

207  {
208  return __tsan_atomic32_exchange(ptr, new_value,
210 }

References __tsan_atomic32_exchange(), and __tsan_memory_order_release.

+ Here is the call graph for this function:

◆ Release_AtomicExchange() [2/2]

Atomic64 v8::base::Release_AtomicExchange ( volatile Atomic64 *  ptr,
Atomic64  new_value 
)
inline

Definition at line 295 of file atomicops_internals_tsan.h.

296  {
298 }

References __tsan_atomic64_exchange(), and __tsan_memory_order_release.

+ Here is the call graph for this function:

◆ Release_CompareAndSwap() [1/3]

Atomic32 v8::base::Release_CompareAndSwap ( volatile Atomic32 ptr,
Atomic32  old_value,
Atomic32  new_value 
)
inline

Definition at line 113 of file atomicops_internals_arm64_gcc.h.

115  {
116  Atomic32 prev;
117 
118  MemoryBarrier();
119  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
120 
121  return prev;
122 }

References MemoryBarrier(), and NoBarrier_CompareAndSwap().

Referenced by Release_CompareAndSwap().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ Release_CompareAndSwap() [2/3]

Atomic64 v8::base::Release_CompareAndSwap ( volatile Atomic64 *  ptr,
Atomic64  old_value,
Atomic64  new_value 
)
inline

Definition at line 263 of file atomicops_internals_arm64_gcc.h.

265  {
266  Atomic64 prev;
267 
268  MemoryBarrier();
269  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
270 
271  return prev;
272 }

References MemoryBarrier(), and NoBarrier_CompareAndSwap().

+ Here is the call graph for this function:

◆ Release_CompareAndSwap() [3/3]

AtomicWord v8::base::Release_CompareAndSwap ( volatile AtomicWord ptr,
AtomicWord  old_value,
AtomicWord  new_value 
)
inline

Definition at line 58 of file atomicops_internals_atomicword_compat.h.

60  {
62  reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
63 }
Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)

References Release_CompareAndSwap().

+ Here is the call graph for this function:

◆ Release_Load() [1/3]

Atomic32 v8::base::Release_Load ( volatile const Atomic32 ptr)
inline

Definition at line 167 of file atomicops_internals_arm64_gcc.h.

167  {
168  MemoryBarrier();
169  return *ptr;
170 }

References MemoryBarrier().

Referenced by Release_Load().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ Release_Load() [2/3]

Atomic64 v8::base::Release_Load ( volatile const Atomic64 *  ptr)
inline

Definition at line 309 of file atomicops_internals_arm64_gcc.h.

309  {
310  MemoryBarrier();
311  return *ptr;
312 }

References MemoryBarrier().

+ Here is the call graph for this function:

◆ Release_Load() [3/3]

AtomicWord v8::base::Release_Load ( volatile const AtomicWord ptr)
inline

Definition at line 90 of file atomicops_internals_atomicword_compat.h.

90  {
92  reinterpret_cast<volatile const Atomic32*>(ptr));
93 }
Atomic32 Release_Load(volatile const Atomic32 *ptr)

References Release_Load().

+ Here is the call graph for this function:

◆ Release_Store() [1/3]

void v8::base::Release_Store ( volatile Atomic32 ptr,
Atomic32  value 
)
inline

Definition at line 137 of file atomicops_internals_arm64_gcc.h.

137  {
138  __asm__ __volatile__ ( // NOLINT
139  "stlr %w[value], %[ptr] \n\t"
140  : [ptr]"=Q" (*ptr)
141  : [value]"r" (value)
142  : "memory"
143  ); // NOLINT
144 }

Referenced by CallOnceImpl(), v8::internal::SamplingCircularQueue< T, Length >::FinishEnqueue(), v8::internal::OptimizingCompilerThread::Flush(), Release_Store(), v8::internal::SamplingCircularQueue< T, Length >::Remove(), v8::internal::OptimizingCompilerThread::Run(), v8::internal::MemoryChunk::set_next_chunk(), v8::internal::MemoryChunk::set_parallel_sweeping(), v8::internal::MemoryChunk::set_prev_chunk(), v8::internal::SweeperThread::Stop(), and v8::internal::OptimizingCompilerThread::Stop().

+ Here is the caller graph for this function:

◆ Release_Store() [2/3]

void v8::base::Release_Store ( volatile Atomic64 *  ptr,
Atomic64  value 
)
inline

Definition at line 283 of file atomicops_internals_arm64_gcc.h.

283  {
284  __asm__ __volatile__ ( // NOLINT
285  "stlr %x[value], %[ptr] \n\t"
286  : [ptr]"=Q" (*ptr)
287  : [value]"r" (value)
288  : "memory"
289  ); // NOLINT
290 }

◆ Release_Store() [3/3]

void v8::base::Release_Store ( volatile AtomicWord ptr,
AtomicWord  value 
)
inline

Definition at line 75 of file atomicops_internals_atomicword_compat.h.

75  {
77  reinterpret_cast<volatile Atomic32*>(ptr), value);
78 }

References Release_Store().

+ Here is the call graph for this function:

◆ s32()

static M32 v8::base::s32 ( int32_t  d)
static

Definition at line 30 of file division-by-constant-unittest.cc.

30  {
31  return SignedDivisionByConstant<uint32_t>(static_cast<uint32_t>(d));
32 }

Referenced by TEST().

+ Here is the caller graph for this function:

◆ s64()

static M64 v8::base::s64 ( int64_t  d)
static

Definition at line 35 of file division-by-constant-unittest.cc.

35  {
36  return SignedDivisionByConstant<uint64_t>(static_cast<uint64_t>(d));
37 }

Referenced by TEST().

+ Here is the caller graph for this function:

◆ saturated_cast()

template<typename Dst , typename Src >
Dst v8::base::saturated_cast ( Src  value)
inline

Definition at line 39 of file safe_conversions.h.

39  {
40  // Optimization for floating point values, which already saturate.
41  if (std::numeric_limits<Dst>::is_iec559)
42  return static_cast<Dst>(value);
43 
44  switch (internal::DstRangeRelationToSrcRange<Dst>(value)) {
46  return static_cast<Dst>(value);
47 
50 
52  return std::numeric_limits<Dst>::max();
53 
54  // Should fail only on attempting to assign NaN to a saturated integer.
56  CHECK(false);
57  return std::numeric_limits<Dst>::max();
58  }
59 
60  UNREACHABLE();
61  return static_cast<Dst>(value);
62 }
#define UNREACHABLE()
Definition: logging.h:30
static int min(int a, int b)
Definition: liveedit.cc:273

References CHECK, v8::internal::min(), v8::base::internal::RANGE_INVALID, v8::base::internal::RANGE_OVERFLOW, v8::base::internal::RANGE_UNDERFLOW, v8::base::internal::RANGE_VALID, and UNREACHABLE.

+ Here is the call graph for this function:

◆ SetThreadName()

static void v8::base::SetThreadName ( const char *  name)
static

Definition at line 471 of file platform-posix.cc.

471  {
472 #if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD
473  pthread_set_name_np(pthread_self(), name);
474 #elif V8_OS_NETBSD
475  STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
476  pthread_setname_np(pthread_self(), "%s", name);
477 #elif V8_OS_MACOSX
478  // pthread_setname_np is only available in 10.6 or later, so test
479  // for it at runtime.
480  int (*dynamic_pthread_setname_np)(const char*);
481  *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
482  dlsym(RTLD_DEFAULT, "pthread_setname_np");
483  if (dynamic_pthread_setname_np == NULL)
484  return;
485 
486  // Mac OS X does not expose the length limit of the name, so hardcode it.
487  static const int kMaxNameLength = 63;
488  STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
489  dynamic_pthread_setname_np(name);
490 #elif defined(PR_SET_NAME)
491  prctl(PR_SET_NAME,
492  reinterpret_cast<unsigned long>(name), // NOLINT
493  0, 0, 0);
494 #endif
495 }
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name

References v8::base::Thread::kMaxThreadNameLength, name, NULL, and STATIC_ASSERT.

Referenced by ThreadEntry().

+ Here is the caller graph for this function:

◆ SignedDivisionByConstant() [1/3]

template<class T >
MagicNumbersForDivision< T > v8::base::SignedDivisionByConstant ( T  d)

Definition at line 23 of file division-by-constant.cc.

23  {
24  STATIC_ASSERT(static_cast<T>(0) < static_cast<T>(-1));
25  DCHECK(d != static_cast<T>(-1) && d != 0 && d != 1);
26  const unsigned bits = static_cast<unsigned>(sizeof(T)) * 8;
27  const T min = (static_cast<T>(1) << (bits - 1));
28  const bool neg = (min & d) != 0;
29  const T ad = neg ? (0 - d) : d;
30  const T t = min + (d >> (bits - 1));
31  const T anc = t - 1 - t % ad; // Absolute value of nc
32  unsigned p = bits - 1; // Init. p.
33  T q1 = min / anc; // Init. q1 = 2**p/|nc|.
34  T r1 = min - q1 * anc; // Init. r1 = rem(2**p, |nc|).
35  T q2 = min / ad; // Init. q2 = 2**p/|d|.
36  T r2 = min - q2 * ad; // Init. r2 = rem(2**p, |d|).
37  T delta;
38  do {
39  p = p + 1;
40  q1 = 2 * q1; // Update q1 = 2**p/|nc|.
41  r1 = 2 * r1; // Update r1 = rem(2**p, |nc|).
42  if (r1 >= anc) { // Must be an unsigned comparison here.
43  q1 = q1 + 1;
44  r1 = r1 - anc;
45  }
46  q2 = 2 * q2; // Update q2 = 2**p/|d|.
47  r2 = 2 * r2; // Update r2 = rem(2**p, |d|).
48  if (r2 >= ad) { // Must be an unsigned comparison here.
49  q2 = q2 + 1;
50  r2 = r2 - ad;
51  }
52  delta = ad - r2;
53  } while (q1 < delta || (q1 == delta && r1 == 0));
54  T mul = q2 + 1;
55  return {neg ? (0 - mul) : mul, p - bits, false};
56 }
#define DCHECK(condition)
Definition: logging.h:205
const Register r2
const QwNeonRegister q1
const Register r1
const QwNeonRegister q2
#define T(name, string, precedence)
Definition: token.cc:25

References DCHECK, v8::internal::min(), v8::internal::q1, v8::internal::q2, v8::internal::r1, v8::internal::r2, STATIC_ASSERT, and T.

+ Here is the call graph for this function:

◆ SignedDivisionByConstant() [2/3]

template MagicNumbersForDivision<uint32_t> v8::base::SignedDivisionByConstant ( uint32_t  d)

◆ SignedDivisionByConstant() [3/3]

template MagicNumbersForDivision<uint64_t> v8::base::SignedDivisionByConstant ( uint64_t  d)

◆ StringToLong()

static unsigned v8::base::StringToLong ( char *  buffer)
static

Definition at line 119 of file platform-freebsd.cc.

119  {
120  return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
121 }

References NULL.

◆ TEST() [1/33]

v8::base::TEST ( ConditionVariable  ,
LoopIncrement   
)

Definition at line 269 of file condition-variable-unittest.cc.

269  {
270  static const int kMaxThreadCount = 16;
271  Mutex mutex;
272  ConditionVariable cv;
273  for (int thread_count = 1; thread_count < kMaxThreadCount; ++thread_count) {
274  int limit = thread_count * 10;
275  int counter = 0;
276 
277  // Setup the threads.
278  Thread** threads = new Thread* [thread_count];
279  for (int n = 0; n < thread_count; ++n) {
280  threads[n] = new LoopIncrementThread(n, &counter, limit, thread_count,
281  &cv, &mutex);
282  }
283 
284  // Start all threads.
285  for (int n = thread_count - 1; n >= 0; --n) {
286  threads[n]->Start();
287  }
288 
289  // Join and cleanup all threads.
290  for (int n = 0; n < thread_count; ++n) {
291  threads[n]->Join();
292  delete threads[n];
293  }
294  delete[] threads;
295 
296  EXPECT_EQ(limit, counter);
297  }
298 }

References v8::base::Thread::Join(), and v8::base::Thread::Start().

+ Here is the call graph for this function:

◆ TEST() [2/33]

v8::base::TEST ( ConditionVariable  ,
MultipleThreadsWithSeparateConditionVariables   
)

Definition at line 60 of file condition-variable-unittest.cc.

60  {
61  static const int kThreadCount = 128;
62  ThreadWithMutexAndConditionVariable threads[kThreadCount];
63 
64  for (int n = 0; n < kThreadCount; ++n) {
65  LockGuard<Mutex> lock_guard(&threads[n].mutex_);
66  EXPECT_FALSE(threads[n].running_);
67  EXPECT_FALSE(threads[n].finished_);
68  threads[n].Start();
69  // Wait for nth thread to start.
70  while (!threads[n].running_) {
71  threads[n].cv_.Wait(&threads[n].mutex_);
72  }
73  }
74 
75  for (int n = kThreadCount - 1; n >= 0; --n) {
76  LockGuard<Mutex> lock_guard(&threads[n].mutex_);
77  EXPECT_TRUE(threads[n].running_);
78  EXPECT_FALSE(threads[n].finished_);
79  }
80 
81  for (int n = 0; n < kThreadCount; ++n) {
82  LockGuard<Mutex> lock_guard(&threads[n].mutex_);
83  EXPECT_TRUE(threads[n].running_);
84  EXPECT_FALSE(threads[n].finished_);
85  // Tell the nth thread to quit.
86  threads[n].running_ = false;
87  threads[n].cv_.NotifyOne();
88  }
89 
90  for (int n = kThreadCount - 1; n >= 0; --n) {
91  // Wait for nth thread to quit.
92  LockGuard<Mutex> lock_guard(&threads[n].mutex_);
93  while (!threads[n].finished_) {
94  threads[n].cv_.Wait(&threads[n].mutex_);
95  }
96  EXPECT_FALSE(threads[n].running_);
97  EXPECT_TRUE(threads[n].finished_);
98  }
99 
100  for (int n = 0; n < kThreadCount; ++n) {
101  threads[n].Join();
102  LockGuard<Mutex> lock_guard(&threads[n].mutex_);
103  EXPECT_FALSE(threads[n].running_);
104  EXPECT_TRUE(threads[n].finished_);
105  }
106 }

◆ TEST() [3/33]

v8::base::TEST ( ConditionVariable  ,
MultipleThreadsWithSharedSeparateConditionVariables   
)

Definition at line 141 of file condition-variable-unittest.cc.

141  {
142  static const int kThreadCount = 128;
143  ThreadWithSharedMutexAndConditionVariable threads[kThreadCount];
144  ConditionVariable cv;
145  Mutex mutex;
146 
147  for (int n = 0; n < kThreadCount; ++n) {
148  threads[n].mutex_ = &mutex;
149  threads[n].cv_ = &cv;
150  }
151 
152  // Start all threads.
153  {
154  LockGuard<Mutex> lock_guard(&mutex);
155  for (int n = 0; n < kThreadCount; ++n) {
156  EXPECT_FALSE(threads[n].running_);
157  EXPECT_FALSE(threads[n].finished_);
158  threads[n].Start();
159  }
160  }
161 
162  // Wait for all threads to start.
163  {
164  LockGuard<Mutex> lock_guard(&mutex);
165  for (int n = kThreadCount - 1; n >= 0; --n) {
166  while (!threads[n].running_) {
167  cv.Wait(&mutex);
168  }
169  }
170  }
171 
172  // Make sure that all threads are running.
173  {
174  LockGuard<Mutex> lock_guard(&mutex);
175  for (int n = 0; n < kThreadCount; ++n) {
176  EXPECT_TRUE(threads[n].running_);
177  EXPECT_FALSE(threads[n].finished_);
178  }
179  }
180 
181  // Tell all threads to quit.
182  {
183  LockGuard<Mutex> lock_guard(&mutex);
184  for (int n = kThreadCount - 1; n >= 0; --n) {
185  EXPECT_TRUE(threads[n].running_);
186  EXPECT_FALSE(threads[n].finished_);
187  // Tell the nth thread to quit.
188  threads[n].running_ = false;
189  }
190  cv.NotifyAll();
191  }
192 
193  // Wait for all threads to quit.
194  {
195  LockGuard<Mutex> lock_guard(&mutex);
196  for (int n = 0; n < kThreadCount; ++n) {
197  while (!threads[n].finished_) {
198  cv.Wait(&mutex);
199  }
200  }
201  }
202 
203  // Make sure all threads are finished.
204  {
205  LockGuard<Mutex> lock_guard(&mutex);
206  for (int n = kThreadCount - 1; n >= 0; --n) {
207  EXPECT_FALSE(threads[n].running_);
208  EXPECT_TRUE(threads[n].finished_);
209  }
210  }
211 
212  // Join all threads.
213  for (int n = 0; n < kThreadCount; ++n) {
214  threads[n].Join();
215  }
216 }

◆ TEST() [4/33]

v8::base::TEST ( ConditionVariable  ,
WaitForAfterNofityOnSameThread   
)

Definition at line 14 of file condition-variable-unittest.cc.

14  {
15  for (int n = 0; n < 10; ++n) {
16  Mutex mutex;
17  ConditionVariable cv;
18 
19  LockGuard<Mutex> lock_guard(&mutex);
20 
21  cv.NotifyOne();
22  EXPECT_FALSE(cv.WaitFor(&mutex, TimeDelta::FromMicroseconds(n)));
23 
24  cv.NotifyAll();
25  EXPECT_FALSE(cv.WaitFor(&mutex, TimeDelta::FromMicroseconds(n)));
26  }
27 }

◆ TEST() [5/33]

v8::base::TEST ( CPUTest  ,
FeatureImplications   
)

Definition at line 11 of file cpu-unittest.cc.

11  {
12  CPU cpu;
13 
14  // ia32 and x64 features
15  EXPECT_TRUE(!cpu.has_sse() || cpu.has_mmx());
16  EXPECT_TRUE(!cpu.has_sse2() || cpu.has_sse());
17  EXPECT_TRUE(!cpu.has_sse3() || cpu.has_sse2());
18  EXPECT_TRUE(!cpu.has_ssse3() || cpu.has_sse3());
19  EXPECT_TRUE(!cpu.has_sse41() || cpu.has_sse3());
20  EXPECT_TRUE(!cpu.has_sse42() || cpu.has_sse41());
21 
22  // arm features
23  EXPECT_TRUE(!cpu.has_vfp3_d32() || cpu.has_vfp3());
24 }

◆ TEST() [6/33]

v8::base::TEST ( CPUTest  ,
RequiredFeatures   
)

Definition at line 27 of file cpu-unittest.cc.

27  {
28  CPU cpu;
29 
30 #if V8_HOST_ARCH_ARM
31  EXPECT_TRUE(cpu.has_fpu());
32 #endif
33 
34 #if V8_HOST_ARCH_IA32
35  EXPECT_TRUE(cpu.has_fpu());
36  EXPECT_TRUE(cpu.has_sahf());
37 #endif
38 
39 #if V8_HOST_ARCH_X64
40  EXPECT_TRUE(cpu.has_fpu());
41  EXPECT_TRUE(cpu.has_cmov());
42  EXPECT_TRUE(cpu.has_mmx());
43  EXPECT_TRUE(cpu.has_sse());
44  EXPECT_TRUE(cpu.has_sse2());
45 #endif
46 }

◆ TEST() [7/33]

v8::base::TEST ( DivisionByConstant  ,
Signed32   
)

Definition at line 44 of file division-by-constant-unittest.cc.

44  {
45  EXPECT_EQ(M32(0x99999999U, 1, false), s32(-5));
46  EXPECT_EQ(M32(0x55555555U, 1, false), s32(-3));
47  int32_t d = -1;
48  for (unsigned k = 1; k <= 32 - 1; ++k) {
49  d *= 2;
50  EXPECT_EQ(M32(0x7FFFFFFFU, k - 1, false), s32(d));
51  }
52  for (unsigned k = 1; k <= 32 - 2; ++k) {
53  EXPECT_EQ(M32(0x80000001U, k - 1, false), s32(1 << k));
54  }
55  EXPECT_EQ(M32(0x55555556U, 0, false), s32(3));
56  EXPECT_EQ(M32(0x66666667U, 1, false), s32(5));
57  EXPECT_EQ(M32(0x2AAAAAABU, 0, false), s32(6));
58  EXPECT_EQ(M32(0x92492493U, 2, false), s32(7));
59  EXPECT_EQ(M32(0x38E38E39U, 1, false), s32(9));
60  EXPECT_EQ(M32(0x66666667U, 2, false), s32(10));
61  EXPECT_EQ(M32(0x2E8BA2E9U, 1, false), s32(11));
62  EXPECT_EQ(M32(0x2AAAAAABU, 1, false), s32(12));
63  EXPECT_EQ(M32(0x51EB851FU, 3, false), s32(25));
64  EXPECT_EQ(M32(0x10624DD3U, 3, false), s32(125));
65  EXPECT_EQ(M32(0x68DB8BADU, 8, false), s32(625));
66 }
static M32 s32(int32_t d)
MagicNumbersForDivision< uint32_t > M32

References s32().

+ Here is the call graph for this function:

◆ TEST() [8/33]

v8::base::TEST ( DivisionByConstant  ,
Signed64   
)

Definition at line 88 of file division-by-constant-unittest.cc.

88  {
89  EXPECT_EQ(M64(0x9999999999999999ULL, 1, false), s64(-5));
90  EXPECT_EQ(M64(0x5555555555555555ULL, 1, false), s64(-3));
91  int64_t d = -1;
92  for (unsigned k = 1; k <= 64 - 1; ++k) {
93  d *= 2;
94  EXPECT_EQ(M64(0x7FFFFFFFFFFFFFFFULL, k - 1, false), s64(d));
95  }
96  for (unsigned k = 1; k <= 64 - 2; ++k) {
97  EXPECT_EQ(M64(0x8000000000000001ULL, k - 1, false), s64(1LL << k));
98  }
99  EXPECT_EQ(M64(0x5555555555555556ULL, 0, false), s64(3));
100  EXPECT_EQ(M64(0x6666666666666667ULL, 1, false), s64(5));
101  EXPECT_EQ(M64(0x2AAAAAAAAAAAAAABULL, 0, false), s64(6));
102  EXPECT_EQ(M64(0x4924924924924925ULL, 1, false), s64(7));
103  EXPECT_EQ(M64(0x1C71C71C71C71C72ULL, 0, false), s64(9));
104  EXPECT_EQ(M64(0x6666666666666667ULL, 2, false), s64(10));
105  EXPECT_EQ(M64(0x2E8BA2E8BA2E8BA3ULL, 1, false), s64(11));
106  EXPECT_EQ(M64(0x2AAAAAAAAAAAAAABULL, 1, false), s64(12));
107  EXPECT_EQ(M64(0xA3D70A3D70A3D70BULL, 4, false), s64(25));
108  EXPECT_EQ(M64(0x20C49BA5E353F7CFULL, 4, false), s64(125));
109  EXPECT_EQ(M64(0x346DC5D63886594BULL, 7, false), s64(625));
110 }
MagicNumbersForDivision< uint64_t > M64
static M64 s64(int64_t d)

References s64().

+ Here is the call graph for this function:

◆ TEST() [9/33]

v8::base::TEST ( DivisionByConstant  ,
Unsigned32   
)

Definition at line 69 of file division-by-constant-unittest.cc.

69  {
70  EXPECT_EQ(M32(0x00000000U, 0, true), u32(1));
71  for (unsigned k = 1; k <= 30; ++k) {
72  EXPECT_EQ(M32(1U << (32 - k), 0, false), u32(1U << k));
73  }
74  EXPECT_EQ(M32(0xAAAAAAABU, 1, false), u32(3));
75  EXPECT_EQ(M32(0xCCCCCCCDU, 2, false), u32(5));
76  EXPECT_EQ(M32(0xAAAAAAABU, 2, false), u32(6));
77  EXPECT_EQ(M32(0x24924925U, 3, true), u32(7));
78  EXPECT_EQ(M32(0x38E38E39U, 1, false), u32(9));
79  EXPECT_EQ(M32(0xCCCCCCCDU, 3, false), u32(10));
80  EXPECT_EQ(M32(0xBA2E8BA3U, 3, false), u32(11));
81  EXPECT_EQ(M32(0xAAAAAAABU, 3, false), u32(12));
82  EXPECT_EQ(M32(0x51EB851FU, 3, false), u32(25));
83  EXPECT_EQ(M32(0x10624DD3U, 3, false), u32(125));
84  EXPECT_EQ(M32(0xD1B71759U, 9, false), u32(625));
85 }
static M32 u32(uint32_t d)

References u32().

+ Here is the call graph for this function:

◆ TEST() [10/33]

v8::base::TEST ( DivisionByConstant  ,
Unsigned64   
)

Definition at line 113 of file division-by-constant-unittest.cc.

113  {
114  EXPECT_EQ(M64(0x0000000000000000ULL, 0, true), u64(1));
115  for (unsigned k = 1; k <= 64 - 2; ++k) {
116  EXPECT_EQ(M64(1ULL << (64 - k), 0, false), u64(1ULL << k));
117  }
118  EXPECT_EQ(M64(0xAAAAAAAAAAAAAAABULL, 1, false), u64(3));
119  EXPECT_EQ(M64(0xCCCCCCCCCCCCCCCDULL, 2, false), u64(5));
120  EXPECT_EQ(M64(0xAAAAAAAAAAAAAAABULL, 2, false), u64(6));
121  EXPECT_EQ(M64(0x2492492492492493ULL, 3, true), u64(7));
122  EXPECT_EQ(M64(0xE38E38E38E38E38FULL, 3, false), u64(9));
123  EXPECT_EQ(M64(0xCCCCCCCCCCCCCCCDULL, 3, false), u64(10));
124  EXPECT_EQ(M64(0x2E8BA2E8BA2E8BA3ULL, 1, false), u64(11));
125  EXPECT_EQ(M64(0xAAAAAAAAAAAAAAABULL, 3, false), u64(12));
126  EXPECT_EQ(M64(0x47AE147AE147AE15ULL, 5, true), u64(25));
127  EXPECT_EQ(M64(0x0624DD2F1A9FBE77ULL, 7, true), u64(125));
128  EXPECT_EQ(M64(0x346DC5D63886594BULL, 7, false), u64(625));
129 }
static M64 u64(uint64_t d)

References u64().

+ Here is the call graph for this function:

◆ TEST() [11/33]

v8::base::TEST ( FlagsTest  ,
BasicOperations   
)

Definition at line 31 of file flags-unittest.cc.

31  {
32  Flags1 a;
33  EXPECT_EQ(kFlag1None, static_cast<int>(a));
34  a |= kFlag1First;
35  EXPECT_EQ(kFlag1First, static_cast<int>(a));
36  a = a | kFlag1Second;
37  EXPECT_EQ(kFlag1All, static_cast<int>(a));
38  a &= kFlag1Second;
39  EXPECT_EQ(kFlag1Second, static_cast<int>(a));
40  a = kFlag1None & a;
41  EXPECT_EQ(kFlag1None, static_cast<int>(a));
42  a ^= (kFlag1All | kFlag1None);
43  EXPECT_EQ(kFlag1All, static_cast<int>(a));
44  Flags1 b = ~a;
45  EXPECT_EQ(kFlag1All, static_cast<int>(a));
46  EXPECT_EQ(~static_cast<int>(a), static_cast<int>(b));
47  Flags1 c = a;
48  EXPECT_EQ(a, c);
49  EXPECT_NE(a, b);
50  EXPECT_EQ(a, bar(a));
51  EXPECT_EQ(a, bar(kFlag1All));
52 }

References v8::base::anonymous_namespace{flags-unittest.cc}::bar(), v8::base::anonymous_namespace{flags-unittest.cc}::kFlag1All, v8::base::anonymous_namespace{flags-unittest.cc}::kFlag1First, v8::base::anonymous_namespace{flags-unittest.cc}::kFlag1None, and v8::base::anonymous_namespace{flags-unittest.cc}::kFlag1Second.

+ Here is the call graph for this function:

◆ TEST() [12/33]

v8::base::TEST ( FlagsTest  ,
ClassScope   
)

Definition at line 95 of file flags-unittest.cc.

95  {
96  Foo::Enums enums;
97  enums |= Foo::kEnum1;
98  enums |= Foo::kEnum2;
99  EXPECT_TRUE(enums & Foo::kEnum1);
100  EXPECT_TRUE(enums & Foo::kEnum2);
101 }

◆ TEST() [13/33]

◆ TEST() [14/33]

v8::base::TEST ( Mutex  ,
LockGuardLazyMutex   
)

Definition at line 29 of file mutex-unittest.cc.

29  {
30  LazyMutex lazy_mutex = LAZY_MUTEX_INITIALIZER;
31  { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer()); }
32  { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer()); }
33 }
#define LAZY_MUTEX_INITIALIZER
Definition: mutex.h:107
LazyStaticInstance< Mutex, DefaultConstructTrait< Mutex >, ThreadSafeInitOnceTrait >::type LazyMutex
Definition: mutex.h:105

References LAZY_MUTEX_INITIALIZER, and v8::base::LazyInstanceImpl< T, AllocationTrait, CreateTrait, InitOnceTrait, DestroyTrait >::Pointer().

+ Here is the call graph for this function:

◆ TEST() [15/33]

v8::base::TEST ( Mutex  ,
LockGuardLazyRecursiveMutex   
)

Definition at line 36 of file mutex-unittest.cc.

36  {
38  { LockGuard<RecursiveMutex> lock_guard(lazy_recursive_mutex.Pointer()); }
39  {
40  LockGuard<RecursiveMutex> lock_guard1(lazy_recursive_mutex.Pointer());
41  LockGuard<RecursiveMutex> lock_guard2(lazy_recursive_mutex.Pointer());
42  }
43 }
#define LAZY_RECURSIVE_MUTEX_INITIALIZER
Definition: mutex.h:188
LazyStaticInstance< RecursiveMutex, DefaultConstructTrait< RecursiveMutex >, ThreadSafeInitOnceTrait >::type LazyRecursiveMutex
Definition: mutex.h:186

References LAZY_RECURSIVE_MUTEX_INITIALIZER, and v8::base::LazyInstanceImpl< T, AllocationTrait, CreateTrait, InitOnceTrait, DestroyTrait >::Pointer().

+ Here is the call graph for this function:

◆ TEST() [16/33]

v8::base::TEST ( Mutex  ,
LockGuardMutex   
)

Definition at line 12 of file mutex-unittest.cc.

12  {
13  Mutex mutex;
14  { LockGuard<Mutex> lock_guard(&mutex); }
15  { LockGuard<Mutex> lock_guard(&mutex); }
16 }

◆ TEST() [17/33]

v8::base::TEST ( Mutex  ,
LockGuardRecursiveMutex   
)

Definition at line 19 of file mutex-unittest.cc.

19  {
20  RecursiveMutex recursive_mutex;
21  { LockGuard<RecursiveMutex> lock_guard(&recursive_mutex); }
22  {
23  LockGuard<RecursiveMutex> lock_guard1(&recursive_mutex);
24  LockGuard<RecursiveMutex> lock_guard2(&recursive_mutex);
25  }
26 }

◆ TEST() [18/33]

v8::base::TEST ( Mutex  ,
MultipleMutexes   
)

Definition at line 46 of file mutex-unittest.cc.

46  {
47  Mutex mutex1;
48  Mutex mutex2;
49  Mutex mutex3;
50  // Order 1
51  mutex1.Lock();
52  mutex2.Lock();
53  mutex3.Lock();
54  mutex1.Unlock();
55  mutex2.Unlock();
56  mutex3.Unlock();
57  // Order 2
58  mutex1.Lock();
59  mutex2.Lock();
60  mutex3.Lock();
61  mutex3.Unlock();
62  mutex2.Unlock();
63  mutex1.Unlock();
64 }

◆ TEST() [19/33]

v8::base::TEST ( Mutex  ,
MultipleRecursiveMutexes   
)

Definition at line 67 of file mutex-unittest.cc.

67  {
68  RecursiveMutex recursive_mutex1;
69  RecursiveMutex recursive_mutex2;
70  // Order 1
71  recursive_mutex1.Lock();
72  recursive_mutex2.Lock();
73  EXPECT_TRUE(recursive_mutex1.TryLock());
74  EXPECT_TRUE(recursive_mutex2.TryLock());
75  recursive_mutex1.Unlock();
76  recursive_mutex1.Unlock();
77  recursive_mutex2.Unlock();
78  recursive_mutex2.Unlock();
79  // Order 2
80  recursive_mutex1.Lock();
81  EXPECT_TRUE(recursive_mutex1.TryLock());
82  recursive_mutex2.Lock();
83  EXPECT_TRUE(recursive_mutex2.TryLock());
84  recursive_mutex2.Unlock();
85  recursive_mutex1.Unlock();
86  recursive_mutex2.Unlock();
87  recursive_mutex1.Unlock();
88 }

◆ TEST() [20/33]

v8::base::TEST ( OS  ,
GetCurrentProcessId   
)

Definition at line 19 of file platform-unittest.cc.

19  {
20 #if V8_OS_POSIX
21  EXPECT_EQ(static_cast<int>(getpid()), OS::GetCurrentProcessId());
22 #endif
23 
24 #if V8_OS_WIN
25  EXPECT_EQ(static_cast<int>(::GetCurrentProcessId()),
26  OS::GetCurrentProcessId());
27 #endif
28 }

References v8::base::OS::GetCurrentProcessId().

+ Here is the call graph for this function:

◆ TEST() [21/33]

v8::base::TEST ( Semaphore  ,
ProducerConsumer   
)

Definition at line 93 of file semaphore-unittest.cc.

93  {
94  char buffer[kBufferSize];
95  std::memset(buffer, 0, sizeof(buffer));
96  Semaphore free_space(kBufferSize);
97  Semaphore used_space(0);
98  ProducerThread producer_thread(buffer, &free_space, &used_space);
99  ConsumerThread consumer_thread(buffer, &free_space, &used_space);
100  producer_thread.Start();
101  consumer_thread.Start();
102  producer_thread.Join();
103  consumer_thread.Join();
104 }

References v8::base::anonymous_namespace{semaphore-unittest.cc}::kBufferSize.

◆ TEST() [22/33]

v8::base::TEST ( Semaphore  ,
WaitAndSignal   
)

Definition at line 107 of file semaphore-unittest.cc.

107  {
108  Semaphore semaphore(0);
109  WaitAndSignalThread t1(&semaphore);
110  WaitAndSignalThread t2(&semaphore);
111 
112  t1.Start();
113  t2.Start();
114 
115  // Make something available.
116  semaphore.Signal();
117 
118  t1.Join();
119  t2.Join();
120 
121  semaphore.Wait();
122 
123  EXPECT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(1)));
124 }

◆ TEST() [23/33]

v8::base::TEST ( Semaphore  ,
WaitFor   
)

Definition at line 127 of file semaphore-unittest.cc.

127  {
128  Semaphore semaphore(0);
129 
130  // Semaphore not signalled - timeout.
131  ASSERT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(0)));
132  ASSERT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(100)));
133  ASSERT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(1000)));
134 
135  // Semaphore signalled - no timeout.
136  semaphore.Signal();
137  ASSERT_TRUE(semaphore.WaitFor(TimeDelta::FromMicroseconds(0)));
138  semaphore.Signal();
139  ASSERT_TRUE(semaphore.WaitFor(TimeDelta::FromMicroseconds(100)));
140  semaphore.Signal();
141  ASSERT_TRUE(semaphore.WaitFor(TimeDelta::FromMicroseconds(1000)));
142 }

◆ TEST() [24/33]

v8::base::TEST ( SysInfoTest  ,
AmountOfVirtualMemory   
)

Definition at line 27 of file sys-info-unittest.cc.

27  {
28  EXPECT_LE(0, SysInfo::AmountOfVirtualMemory());
29 }

◆ TEST() [25/33]

v8::base::TEST ( SysInfoTest  ,
DISABLE_ON_NACL(AmountOfPhysicalMemory)   
)

Definition at line 22 of file sys-info-unittest.cc.

22  {
23  EXPECT_LT(0, SysInfo::AmountOfPhysicalMemory());
24 }

◆ TEST() [26/33]

v8::base::TEST ( SysInfoTest  ,
NumberOfProcessors   
)

Definition at line 17 of file sys-info-unittest.cc.

17  {
18  EXPECT_LT(0, SysInfo::NumberOfProcessors());
19 }

◆ TEST() [27/33]

v8::base::TEST ( Thread  ,
SelfJoin   
)

Definition at line 42 of file platform-unittest.cc.

42  {
43  SelfJoinThread thread;
44  thread.Start();
45  thread.Join();
46 }

◆ TEST() [28/33]

v8::base::TEST ( Time  ,
JsTime   
)

Definition at line 55 of file time-unittest.cc.

55  {
56  Time t = Time::FromJsTime(700000.3);
57  EXPECT_DOUBLE_EQ(700000.3, t.ToJsTime());
58 }

◆ TEST() [29/33]

v8::base::TEST ( Time  ,
NowResolution   
)

Definition at line 145 of file time-unittest.cc.

145  {
146  // We assume that Time::Now() has at least 16ms resolution.
147  static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(16);
148  ResolutionTest<Time>(&Time::Now, kTargetGranularity);
149 }

◆ TEST() [30/33]

v8::base::TEST ( TimeDelta  ,
FromAndIn   
)

Definition at line 24 of file time-unittest.cc.

24  {
25  EXPECT_EQ(TimeDelta::FromDays(2), TimeDelta::FromHours(48));
26  EXPECT_EQ(TimeDelta::FromHours(3), TimeDelta::FromMinutes(180));
27  EXPECT_EQ(TimeDelta::FromMinutes(2), TimeDelta::FromSeconds(120));
28  EXPECT_EQ(TimeDelta::FromSeconds(2), TimeDelta::FromMilliseconds(2000));
29  EXPECT_EQ(TimeDelta::FromMilliseconds(2), TimeDelta::FromMicroseconds(2000));
30  EXPECT_EQ(static_cast<int>(13), TimeDelta::FromDays(13).InDays());
31  EXPECT_EQ(static_cast<int>(13), TimeDelta::FromHours(13).InHours());
32  EXPECT_EQ(static_cast<int>(13), TimeDelta::FromMinutes(13).InMinutes());
33  EXPECT_EQ(static_cast<int64_t>(13), TimeDelta::FromSeconds(13).InSeconds());
34  EXPECT_DOUBLE_EQ(13.0, TimeDelta::FromSeconds(13).InSecondsF());
35  EXPECT_EQ(static_cast<int64_t>(13),
36  TimeDelta::FromMilliseconds(13).InMilliseconds());
37  EXPECT_DOUBLE_EQ(13.0, TimeDelta::FromMilliseconds(13).InMillisecondsF());
38  EXPECT_EQ(static_cast<int64_t>(13),
39  TimeDelta::FromMicroseconds(13).InMicroseconds());
40 }

◆ TEST() [31/33]

v8::base::TEST ( TimeTicks  ,
HighResolutionNowResolution   
)

Definition at line 159 of file time-unittest.cc.

159  {
160  if (!TimeTicks::IsHighResolutionClockWorking()) return;
161 
162  // We assume that TimeTicks::HighResolutionNow() has sub-ms resolution.
163  static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(1);
164  ResolutionTest<TimeTicks>(&TimeTicks::HighResolutionNow, kTargetGranularity);
165 }

◆ TEST() [32/33]

v8::base::TEST ( TimeTicks  ,
IsMonotonic   
)

Definition at line 168 of file time-unittest.cc.

168  {
169  TimeTicks previous_normal_ticks;
170  TimeTicks previous_highres_ticks;
171  ElapsedTimer timer;
172  timer.Start();
173  while (!timer.HasExpired(TimeDelta::FromMilliseconds(100))) {
174  TimeTicks normal_ticks = TimeTicks::Now();
175  TimeTicks highres_ticks = TimeTicks::HighResolutionNow();
176  EXPECT_GE(normal_ticks, previous_normal_ticks);
177  EXPECT_GE((normal_ticks - previous_normal_ticks).InMicroseconds(), 0);
178  EXPECT_GE(highres_ticks, previous_highres_ticks);
179  EXPECT_GE((highres_ticks - previous_highres_ticks).InMicroseconds(), 0);
180  previous_normal_ticks = normal_ticks;
181  previous_highres_ticks = highres_ticks;
182  }
183 }

◆ TEST() [33/33]

v8::base::TEST ( TimeTicks  ,
NowResolution   
)

Definition at line 152 of file time-unittest.cc.

152  {
153  // We assume that TimeTicks::Now() has at least 16ms resolution.
154  static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(16);
155  ResolutionTest<TimeTicks>(&TimeTicks::Now, kTargetGranularity);
156 }

◆ TEST_F()

v8::base::TEST_F ( ThreadLocalStorageTest  ,
DoTest   
)

Definition at line 103 of file platform-unittest.cc.

103  {
104  Run();
105  Start();
106  Join();
107 }

◆ TEST_P() [1/3]

v8::base::TEST_P ( RandomNumberGeneratorTest  ,
NextBooleanReturnsFalseOrTrue   
)

Definition at line 29 of file random-number-generator-unittest.cc.

29  {
30  RandomNumberGenerator rng(GetParam());
31  for (int k = 0; k < kMaxRuns; ++k) {
32  bool b = rng.NextBool();
33  EXPECT_TRUE(b == false || b == true);
34  }
35 }

References kMaxRuns.

◆ TEST_P() [2/3]

v8::base::TEST_P ( RandomNumberGeneratorTest  ,
NextDoubleReturnsValueBetween0And1   
)

Definition at line 38 of file random-number-generator-unittest.cc.

38  {
39  RandomNumberGenerator rng(GetParam());
40  for (int k = 0; k < kMaxRuns; ++k) {
41  double d = rng.NextDouble();
42  EXPECT_LE(0.0, d);
43  EXPECT_LT(d, 1.0);
44  }
45 }

References kMaxRuns.

◆ TEST_P() [3/3]

v8::base::TEST_P ( RandomNumberGeneratorTest  ,
NextIntWithMaxValue   
)

Definition at line 19 of file random-number-generator-unittest.cc.

19  {
20  RandomNumberGenerator rng(GetParam());
21  for (int max = 1; max <= kMaxRuns; ++max) {
22  int n = rng.NextInt(max);
23  EXPECT_LE(0, n);
24  EXPECT_LT(n, max);
25  }
26 }

References kMaxRuns.

◆ ThreadEntry() [1/2]

static void* v8::base::ThreadEntry ( void *  arg)
static

Definition at line 498 of file platform-posix.cc.

498  {
499  Thread* thread = reinterpret_cast<Thread*>(arg);
500  // We take the lock here to make sure that pthread_create finished first since
501  // we don't know which thread will run first (the original thread or the new
502  // one).
503  { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
504  SetThreadName(thread->name());
505  DCHECK(thread->data()->thread_ != kNoThread);
506  thread->NotifyStartedAndRun();
507  return NULL;
508 }
static void SetThreadName(const char *name)

References v8::base::Thread::data(), DCHECK, kNoThread, v8::base::Thread::name(), v8::base::Thread::NotifyStartedAndRun(), NULL, SetThreadName(), v8::base::Thread::PlatformData::thread_, and v8::base::Thread::PlatformData::thread_creation_mutex_.

Referenced by v8::base::Thread::Start().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ThreadEntry() [2/2]

static unsigned int __stdcall v8::base::ThreadEntry ( void *  arg)
static

Definition at line 1314 of file platform-win32.cc.

1314  {
1315  Thread* thread = reinterpret_cast<Thread*>(arg);
1316  thread->NotifyStartedAndRun();
1317  return 0;
1318 }

References v8::base::Thread::NotifyStartedAndRun().

+ Here is the call graph for this function:

◆ u32()

static M32 v8::base::u32 ( uint32_t  d)
static

Definition at line 40 of file division-by-constant-unittest.cc.

40 { return UnsignedDivisionByConstant<uint32_t>(d); }

Referenced by TEST().

+ Here is the caller graph for this function:

◆ u64()

static M64 v8::base::u64 ( uint64_t  d)
static

Definition at line 41 of file division-by-constant-unittest.cc.

41 { return UnsignedDivisionByConstant<uint64_t>(d); }

Referenced by TEST().

+ Here is the caller graph for this function:

◆ UnsignedDivisionByConstant() [1/3]

template<class T >
MagicNumbersForDivision< T > v8::base::UnsignedDivisionByConstant ( T  d,
unsigned  leading_zeros 
)

Definition at line 60 of file division-by-constant.cc.

61  {
62  STATIC_ASSERT(static_cast<T>(0) < static_cast<T>(-1));
63  DCHECK(d != 0);
64  const unsigned bits = static_cast<unsigned>(sizeof(T)) * 8;
65  const T ones = ~static_cast<T>(0) >> leading_zeros;
66  const T min = static_cast<T>(1) << (bits - 1);
67  const T max = ~static_cast<T>(0) >> 1;
68  const T nc = ones - (ones - d) % d;
69  bool a = false; // Init. "add" indicator.
70  unsigned p = bits - 1; // Init. p.
71  T q1 = min / nc; // Init. q1 = 2**p/nc
72  T r1 = min - q1 * nc; // Init. r1 = rem(2**p,nc)
73  T q2 = max / d; // Init. q2 = (2**p - 1)/d.
74  T r2 = max - q2 * d; // Init. r2 = rem(2**p - 1, d).
75  T delta;
76  do {
77  p = p + 1;
78  if (r1 >= nc - r1) {
79  q1 = 2 * q1 + 1;
80  r1 = 2 * r1 - nc;
81  } else {
82  q1 = 2 * q1;
83  r1 = 2 * r1;
84  }
85  if (r2 + 1 >= d - r2) {
86  if (q2 >= max) a = true;
87  q2 = 2 * q2 + 1;
88  r2 = 2 * r2 + 1 - d;
89  } else {
90  if (q2 >= min) a = true;
91  q2 = 2 * q2;
92  r2 = 2 * r2 + 1;
93  }
94  delta = d - 1 - r2;
95  } while (p < bits * 2 && (q1 < delta || (q1 == delta && r1 == 0)));
96  return {q2 + 1, p - bits, a};
97 }

References DCHECK, v8::internal::min(), v8::internal::q1, v8::internal::q2, v8::internal::r1, v8::internal::r2, STATIC_ASSERT, and T.

+ Here is the call graph for this function:

◆ UnsignedDivisionByConstant() [2/3]

template MagicNumbersForDivision<uint32_t> v8::base::UnsignedDivisionByConstant ( uint32_t  d,
unsigned  leading_zeros 
)

◆ UnsignedDivisionByConstant() [3/3]

template MagicNumbersForDivision<uint64_t> v8::base::UnsignedDivisionByConstant ( uint64_t  d,
unsigned  leading_zeros 
)

◆ VPrintHelper()

static void v8::base::VPrintHelper ( FILE *  stream,
const char *  format,
va_list  args 
)
static

Definition at line 561 of file platform-win32.cc.

561  {
562  if ((stream == stdout || stream == stderr) && !HasConsole()) {
563  // It is important to use safe print here in order to avoid
564  // overflowing the buffer. We might truncate the output, but this
565  // does not crash.
566  char buffer[4096];
567  OS::VSNPrintF(buffer, sizeof(buffer), format, args);
568  OutputDebugStringA(buffer);
569  } else {
570  vfprintf(stream, format, args);
571  }
572 }
static bool HasConsole()
int VSNPrintF(Vector< char > str, const char *format, va_list args)
Definition: utils.cc:114

References HasConsole(), and v8::base::OS::VSNPrintF().

+ Here is the call graph for this function:

Variable Documentation

◆ AtomicOps_Internalx86CPUFeatures

struct AtomicOps_x86CPUFeatureStruct v8::base::AtomicOps_Internalx86CPUFeatures
extern

◆ entropy_mutex

LazyMutex v8::base::entropy_mutex = LAZY_MUTEX_INITIALIZER
static

Definition at line 19 of file random-number-generator.cc.

◆ entropy_source

RandomNumberGenerator::EntropySource v8::base::entropy_source = NULL
static

Definition at line 20 of file random-number-generator.cc.

Referenced by v8::V8::SetEntropySource().

◆ kernel_tick_clock

Initial value:
=
#define LAZY_STATIC_INSTANCE_INITIALIZER
Definition: lazy-instance.h:77

Definition at line 637 of file time.cc.

◆ kMaxRuns

const int v8::base::kMaxRuns = 12345
static

Definition at line 16 of file random-number-generator-unittest.cc.

Referenced by TEST_P().

◆ kMmapFd [1/6]

const int v8::base::kMmapFd = -1
static

Definition at line 172 of file platform-freebsd.cc.

◆ kMmapFd [2/6]

const int v8::base::kMmapFd = -1
static

Definition at line 296 of file platform-linux.cc.

◆ kMmapFd [3/6]

const int v8::base::kMmapFd = VM_MAKE_TAG(255)
static

Definition at line 50 of file platform-macos.cc.

◆ kMmapFd [4/6]

const int v8::base::kMmapFd = -1
static

Definition at line 203 of file platform-openbsd.cc.

◆ kMmapFd [5/6]

const int v8::base::kMmapFd = -1
static

Definition at line 239 of file platform-qnx.cc.

◆ kMmapFd [6/6]

const int v8::base::kMmapFd = -1
static

Definition at line 144 of file platform-solaris.cc.

◆ kMmapFdOffset [1/6]

const int v8::base::kMmapFdOffset = 0
static

Definition at line 173 of file platform-freebsd.cc.

◆ kMmapFdOffset [2/6]

const int v8::base::kMmapFdOffset = 0
static

Definition at line 297 of file platform-linux.cc.

◆ kMmapFdOffset [3/6]

const off_t v8::base::kMmapFdOffset = 0
static

Definition at line 51 of file platform-macos.cc.

◆ kMmapFdOffset [4/6]

const int v8::base::kMmapFdOffset = 0
static

Definition at line 204 of file platform-openbsd.cc.

◆ kMmapFdOffset [5/6]

const int v8::base::kMmapFdOffset = 0
static

Definition at line 240 of file platform-qnx.cc.

◆ kMmapFdOffset [6/6]

const int v8::base::kMmapFdOffset = 0
static

Definition at line 145 of file platform-solaris.cc.

◆ kNoThread [1/2]

const pthread_t v8::base::kNoThread = (pthread_t) 0
static

Definition at line 43 of file platform-qnx.cc.

Referenced by v8::base::Thread::Start(), and ThreadEntry().

◆ kNoThread [2/2]

const HANDLE v8::base::kNoThread = INVALID_HANDLE_VALUE
static

Definition at line 1308 of file platform-win32.cc.

◆ output_mode

OutputMode v8::base::output_mode = UNKNOWN
static

Definition at line 539 of file platform-win32.cc.

Referenced by HasConsole().

◆ platform_random_number_generator [1/2]

LazyInstance<RandomNumberGenerator>::type v8::base::platform_random_number_generator = LAZY_INSTANCE_INITIALIZER
static

Definition at line 135 of file platform-posix.cc.

Referenced by v8::base::OS::GetRandomMmapAddr(), and v8::base::OS::Initialize().

◆ platform_random_number_generator [2/2]

LazyInstance<RandomNumberGenerator>::type v8::base::platform_random_number_generator = LAZY_INSTANCE_INITIALIZER
static

Definition at line 724 of file platform-win32.cc.