V8 Project
time.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
7 #if V8_OS_POSIX
8 #include <fcntl.h> // for O_RDONLY
9 #include <sys/time.h>
10 #include <unistd.h>
11 #endif
12 #if V8_OS_MACOSX
13 #include <mach/mach_time.h>
14 #endif
15 
16 #include <string.h>
17 
18 #if V8_OS_WIN
19 #include "src/base/lazy-instance.h"
20 #include "src/base/win32-headers.h"
21 #endif
22 #include "src/base/cpu.h"
23 #include "src/base/logging.h"
25 
26 namespace v8 {
27 namespace base {
28 
29 TimeDelta TimeDelta::FromDays(int days) {
30  return TimeDelta(days * Time::kMicrosecondsPerDay);
31 }
32 
33 
34 TimeDelta TimeDelta::FromHours(int hours) {
35  return TimeDelta(hours * Time::kMicrosecondsPerHour);
36 }
37 
38 
39 TimeDelta TimeDelta::FromMinutes(int minutes) {
40  return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
41 }
42 
43 
44 TimeDelta TimeDelta::FromSeconds(int64_t seconds) {
45  return TimeDelta(seconds * Time::kMicrosecondsPerSecond);
46 }
47 
48 
49 TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) {
50  return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond);
51 }
52 
53 
54 TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
55  return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond);
56 }
57 
58 
59 int TimeDelta::InDays() const {
60  return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
61 }
62 
63 
64 int TimeDelta::InHours() const {
65  return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
66 }
67 
68 
69 int TimeDelta::InMinutes() const {
70  return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
71 }
72 
73 
74 double TimeDelta::InSecondsF() const {
75  return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
76 }
77 
78 
79 int64_t TimeDelta::InSeconds() const {
80  return delta_ / Time::kMicrosecondsPerSecond;
81 }
82 
83 
84 double TimeDelta::InMillisecondsF() const {
85  return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
86 }
87 
88 
89 int64_t TimeDelta::InMilliseconds() const {
90  return delta_ / Time::kMicrosecondsPerMillisecond;
91 }
92 
93 
94 int64_t TimeDelta::InNanoseconds() const {
95  return delta_ * Time::kNanosecondsPerMicrosecond;
96 }
97 
98 
99 #if V8_OS_MACOSX
100 
101 TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
102  DCHECK_GE(ts.tv_nsec, 0);
103  DCHECK_LT(ts.tv_nsec,
104  static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
105  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
106  ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
107 }
108 
109 
110 struct mach_timespec TimeDelta::ToMachTimespec() const {
111  struct mach_timespec ts;
112  DCHECK(delta_ >= 0);
113  ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
114  ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
115  Time::kNanosecondsPerMicrosecond;
116  return ts;
117 }
118 
119 #endif // V8_OS_MACOSX
120 
121 
122 #if V8_OS_POSIX
123 
124 TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
125  DCHECK_GE(ts.tv_nsec, 0);
126  DCHECK_LT(ts.tv_nsec,
127  static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
128  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
129  ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
130 }
131 
132 
133 struct timespec TimeDelta::ToTimespec() const {
134  struct timespec ts;
135  ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
136  ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
137  Time::kNanosecondsPerMicrosecond;
138  return ts;
139 }
140 
141 #endif // V8_OS_POSIX
142 
143 
144 #if V8_OS_WIN
145 
146 // We implement time using the high-resolution timers so that we can get
147 // timeouts which are smaller than 10-15ms. To avoid any drift, we
148 // periodically resync the internal clock to the system clock.
149 class Clock FINAL {
150  public:
151  Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
152 
153  Time Now() {
154  // Time between resampling the un-granular clock for this API (1 minute).
155  const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
156 
157  LockGuard<Mutex> lock_guard(&mutex_);
158 
159  // Determine current time and ticks.
160  TimeTicks ticks = GetSystemTicks();
161  Time time = GetSystemTime();
162 
163  // Check if we need to synchronize with the system clock due to a backwards
164  // time change or the amount of time elapsed.
165  TimeDelta elapsed = ticks - initial_ticks_;
166  if (time < initial_time_ || elapsed > kMaxElapsedTime) {
167  initial_ticks_ = ticks;
168  initial_time_ = time;
169  return time;
170  }
171 
172  return initial_time_ + elapsed;
173  }
174 
175  Time NowFromSystemTime() {
176  LockGuard<Mutex> lock_guard(&mutex_);
177  initial_ticks_ = GetSystemTicks();
178  initial_time_ = GetSystemTime();
179  return initial_time_;
180  }
181 
182  private:
183  static TimeTicks GetSystemTicks() {
184  return TimeTicks::Now();
185  }
186 
187  static Time GetSystemTime() {
188  FILETIME ft;
189  ::GetSystemTimeAsFileTime(&ft);
190  return Time::FromFiletime(ft);
191  }
192 
193  TimeTicks initial_ticks_;
194  Time initial_time_;
195  Mutex mutex_;
196 };
197 
198 
199 static LazyStaticInstance<Clock, DefaultConstructTrait<Clock>,
200  ThreadSafeInitOnceTrait>::type clock =
202 
203 
204 Time Time::Now() {
205  return clock.Pointer()->Now();
206 }
207 
208 
209 Time Time::NowFromSystemTime() {
210  return clock.Pointer()->NowFromSystemTime();
211 }
212 
213 
214 // Time between windows epoch and standard epoch.
215 static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000);
216 
217 
218 Time Time::FromFiletime(FILETIME ft) {
219  if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
220  return Time();
221  }
222  if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
223  ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
224  return Max();
225  }
226  int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
227  (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
228  return Time(us - kTimeToEpochInMicroseconds);
229 }
230 
231 
232 FILETIME Time::ToFiletime() const {
233  DCHECK(us_ >= 0);
234  FILETIME ft;
235  if (IsNull()) {
236  ft.dwLowDateTime = 0;
237  ft.dwHighDateTime = 0;
238  return ft;
239  }
240  if (IsMax()) {
241  ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
242  ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
243  return ft;
244  }
245  uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
246  ft.dwLowDateTime = static_cast<DWORD>(us);
247  ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
248  return ft;
249 }
250 
251 #elif V8_OS_POSIX
252 
253 Time Time::Now() {
254  struct timeval tv;
255  int result = gettimeofday(&tv, NULL);
256  DCHECK_EQ(0, result);
257  USE(result);
258  return FromTimeval(tv);
259 }
260 
261 
262 Time Time::NowFromSystemTime() {
263  return Now();
264 }
265 
266 
267 Time Time::FromTimespec(struct timespec ts) {
268  DCHECK(ts.tv_nsec >= 0);
269  DCHECK(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond)); // NOLINT
270  if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
271  return Time();
272  }
273  if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) && // NOLINT
274  ts.tv_sec == std::numeric_limits<time_t>::max()) {
275  return Max();
276  }
277  return Time(ts.tv_sec * kMicrosecondsPerSecond +
278  ts.tv_nsec / kNanosecondsPerMicrosecond);
279 }
280 
281 
282 struct timespec Time::ToTimespec() const {
283  struct timespec ts;
284  if (IsNull()) {
285  ts.tv_sec = 0;
286  ts.tv_nsec = 0;
287  return ts;
288  }
289  if (IsMax()) {
290  ts.tv_sec = std::numeric_limits<time_t>::max();
291  ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT
292  return ts;
293  }
294  ts.tv_sec = us_ / kMicrosecondsPerSecond;
295  ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
296  return ts;
297 }
298 
299 
300 Time Time::FromTimeval(struct timeval tv) {
301  DCHECK(tv.tv_usec >= 0);
302  DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
303  if (tv.tv_usec == 0 && tv.tv_sec == 0) {
304  return Time();
305  }
306  if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
307  tv.tv_sec == std::numeric_limits<time_t>::max()) {
308  return Max();
309  }
310  return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
311 }
312 
313 
314 struct timeval Time::ToTimeval() const {
315  struct timeval tv;
316  if (IsNull()) {
317  tv.tv_sec = 0;
318  tv.tv_usec = 0;
319  return tv;
320  }
321  if (IsMax()) {
322  tv.tv_sec = std::numeric_limits<time_t>::max();
323  tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
324  return tv;
325  }
326  tv.tv_sec = us_ / kMicrosecondsPerSecond;
327  tv.tv_usec = us_ % kMicrosecondsPerSecond;
328  return tv;
329 }
330 
331 #endif // V8_OS_WIN
332 
333 
334 Time Time::FromJsTime(double ms_since_epoch) {
335  // The epoch is a valid time, so this constructor doesn't interpret
336  // 0 as the null time.
337  if (ms_since_epoch == std::numeric_limits<double>::max()) {
338  return Max();
339  }
340  return Time(
341  static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
342 }
343 
344 
345 double Time::ToJsTime() const {
346  if (IsNull()) {
347  // Preserve 0 so the invalid result doesn't depend on the platform.
348  return 0;
349  }
350  if (IsMax()) {
351  // Preserve max without offset to prevent overflow.
352  return std::numeric_limits<double>::max();
353  }
354  return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
355 }
356 
357 
358 #if V8_OS_WIN
359 
360 class TickClock {
361  public:
362  virtual ~TickClock() {}
363  virtual int64_t Now() = 0;
364  virtual bool IsHighResolution() = 0;
365 };
366 
367 
368 // Overview of time counters:
369 // (1) CPU cycle counter. (Retrieved via RDTSC)
370 // The CPU counter provides the highest resolution time stamp and is the least
371 // expensive to retrieve. However, the CPU counter is unreliable and should not
372 // be used in production. Its biggest issue is that it is per processor and it
373 // is not synchronized between processors. Also, on some computers, the counters
374 // will change frequency due to thermal and power changes, and stop in some
375 // states.
376 //
377 // (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
378 // resolution (100 nanoseconds) time stamp but is comparatively more expensive
379 // to retrieve. What QueryPerformanceCounter actually does is up to the HAL.
380 // (with some help from ACPI).
381 // According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx
382 // in the worst case, it gets the counter from the rollover interrupt on the
383 // programmable interrupt timer. In best cases, the HAL may conclude that the
384 // RDTSC counter runs at a constant frequency, then it uses that instead. On
385 // multiprocessor machines, it will try to verify the values returned from
386 // RDTSC on each processor are consistent with each other, and apply a handful
387 // of workarounds for known buggy hardware. In other words, QPC is supposed to
388 // give consistent result on a multiprocessor computer, but it is unreliable in
389 // reality due to bugs in BIOS or HAL on some, especially old computers.
390 // With recent updates on HAL and newer BIOS, QPC is getting more reliable but
391 // it should be used with caution.
392 //
393 // (3) System time. The system time provides a low-resolution (typically 10ms
394 // to 55 milliseconds) time stamp but is comparatively less expensive to
395 // retrieve and more reliable.
396 class HighResolutionTickClock FINAL : public TickClock {
397  public:
398  explicit HighResolutionTickClock(int64_t ticks_per_second)
399  : ticks_per_second_(ticks_per_second) {
400  DCHECK_LT(0, ticks_per_second);
401  }
402  virtual ~HighResolutionTickClock() {}
403 
404  virtual int64_t Now() OVERRIDE {
405  LARGE_INTEGER now;
406  BOOL result = QueryPerformanceCounter(&now);
407  DCHECK(result);
408  USE(result);
409 
410  // Intentionally calculate microseconds in a round about manner to avoid
411  // overflow and precision issues. Think twice before simplifying!
412  int64_t whole_seconds = now.QuadPart / ticks_per_second_;
413  int64_t leftover_ticks = now.QuadPart % ticks_per_second_;
414  int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
415  ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
416 
417  // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow()
418  // will never return 0.
419  return ticks + 1;
420  }
421 
422  virtual bool IsHighResolution() OVERRIDE {
423  return true;
424  }
425 
426  private:
427  int64_t ticks_per_second_;
428 };
429 
430 
431 class RolloverProtectedTickClock FINAL : public TickClock {
432  public:
433  // We initialize rollover_ms_ to 1 to ensure that we will never
434  // return 0 from TimeTicks::HighResolutionNow() and TimeTicks::Now() below.
435  RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
436  virtual ~RolloverProtectedTickClock() {}
437 
438  virtual int64_t Now() OVERRIDE {
439  LockGuard<Mutex> lock_guard(&mutex_);
440  // We use timeGetTime() to implement TimeTicks::Now(), which rolls over
441  // every ~49.7 days. We try to track rollover ourselves, which works if
442  // TimeTicks::Now() is called at least every 49 days.
443  // Note that we do not use GetTickCount() here, since timeGetTime() gives
444  // more predictable delta values, as described here:
445  // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
446  // timeGetTime() provides 1ms granularity when combined with
447  // timeBeginPeriod(). If the host application for V8 wants fast timers, it
448  // can use timeBeginPeriod() to increase the resolution.
449  DWORD now = timeGetTime();
450  if (now < last_seen_now_) {
451  rollover_ms_ += V8_INT64_C(0x100000000); // ~49.7 days.
452  }
453  last_seen_now_ = now;
454  return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
455  }
456 
457  virtual bool IsHighResolution() OVERRIDE {
458  return false;
459  }
460 
461  private:
462  Mutex mutex_;
463  DWORD last_seen_now_;
464  int64_t rollover_ms_;
465 };
466 
467 
468 static LazyStaticInstance<RolloverProtectedTickClock,
469  DefaultConstructTrait<RolloverProtectedTickClock>,
470  ThreadSafeInitOnceTrait>::type tick_clock =
472 
473 
474 struct CreateHighResTickClockTrait {
475  static TickClock* Create() {
476  // Check if the installed hardware supports a high-resolution performance
477  // counter, and if not fallback to the low-resolution tick clock.
478  LARGE_INTEGER ticks_per_second;
479  if (!QueryPerformanceFrequency(&ticks_per_second)) {
480  return tick_clock.Pointer();
481  }
482 
483  // On Athlon X2 CPUs (e.g. model 15) the QueryPerformanceCounter
484  // is unreliable, fallback to the low-resolution tick clock.
485  CPU cpu;
486  if (strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15) {
487  return tick_clock.Pointer();
488  }
489 
490  return new HighResolutionTickClock(ticks_per_second.QuadPart);
491  }
492 };
493 
494 
495 static LazyDynamicInstance<TickClock, CreateHighResTickClockTrait,
496  ThreadSafeInitOnceTrait>::type high_res_tick_clock =
498 
499 
500 TimeTicks TimeTicks::Now() {
501  // Make sure we never return 0 here.
502  TimeTicks ticks(tick_clock.Pointer()->Now());
503  DCHECK(!ticks.IsNull());
504  return ticks;
505 }
506 
507 
508 TimeTicks TimeTicks::HighResolutionNow() {
509  // Make sure we never return 0 here.
510  TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
511  DCHECK(!ticks.IsNull());
512  return ticks;
513 }
514 
515 
516 // static
517 bool TimeTicks::IsHighResolutionClockWorking() {
518  return high_res_tick_clock.Pointer()->IsHighResolution();
519 }
520 
521 
522 // static
523 TimeTicks TimeTicks::KernelTimestampNow() { return TimeTicks(0); }
524 
525 
526 // static
527 bool TimeTicks::KernelTimestampAvailable() { return false; }
528 
529 #else // V8_OS_WIN
530 
531 TimeTicks TimeTicks::Now() {
532  return HighResolutionNow();
533 }
534 
535 
536 TimeTicks TimeTicks::HighResolutionNow() {
537  int64_t ticks;
538 #if V8_OS_MACOSX
539  static struct mach_timebase_info info;
540  if (info.denom == 0) {
541  kern_return_t result = mach_timebase_info(&info);
542  DCHECK_EQ(KERN_SUCCESS, result);
543  USE(result);
544  }
545  ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
546  info.numer / info.denom);
547 #elif V8_OS_SOLARIS
548  ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
549 #elif V8_LIBRT_NOT_AVAILABLE
550  // TODO(bmeurer): This is a temporary hack to support cross-compiling
551  // Chrome for Android in AOSP. Remove this once AOSP is fixed, also
552  // cleanup the tools/gyp/v8.gyp file.
553  struct timeval tv;
554  int result = gettimeofday(&tv, NULL);
555  DCHECK_EQ(0, result);
556  USE(result);
557  ticks = (tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec);
558 #elif V8_OS_POSIX
559  struct timespec ts;
560  int result = clock_gettime(CLOCK_MONOTONIC, &ts);
561  DCHECK_EQ(0, result);
562  USE(result);
563  ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond +
564  ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
565 #endif // V8_OS_MACOSX
566  // Make sure we never return 0 here.
567  return TimeTicks(ticks + 1);
568 }
569 
570 
571 // static
572 bool TimeTicks::IsHighResolutionClockWorking() {
573  return true;
574 }
575 
576 
577 #if V8_OS_LINUX && !V8_LIBRT_NOT_AVAILABLE
578 
579 class KernelTimestampClock {
580  public:
581  KernelTimestampClock() : clock_fd_(-1), clock_id_(kClockInvalid) {
582  clock_fd_ = open(kTraceClockDevice, O_RDONLY);
583  if (clock_fd_ == -1) {
584  return;
585  }
586  clock_id_ = get_clockid(clock_fd_);
587  }
588 
589  virtual ~KernelTimestampClock() {
590  if (clock_fd_ != -1) {
591  close(clock_fd_);
592  }
593  }
594 
595  int64_t Now() {
596  if (clock_id_ == kClockInvalid) {
597  return 0;
598  }
599 
600  struct timespec ts;
601 
602  clock_gettime(clock_id_, &ts);
603  return ((int64_t)ts.tv_sec * kNsecPerSec) + ts.tv_nsec;
604  }
605 
606  bool Available() { return clock_id_ != kClockInvalid; }
607 
608  private:
609  static const clockid_t kClockInvalid = -1;
610  static const char kTraceClockDevice[];
611  static const uint64_t kNsecPerSec = 1000000000;
612 
613  int clock_fd_;
614  clockid_t clock_id_;
615 
616  static int get_clockid(int fd) { return ((~(clockid_t)(fd) << 3) | 3); }
617 };
618 
619 
620 // Timestamp module name
621 const char KernelTimestampClock::kTraceClockDevice[] = "/dev/trace_clock";
622 
623 #else
624 
626  public:
628 
629  int64_t Now() { return 0; }
630  bool Available() { return false; }
631 };
632 
633 #endif // V8_OS_LINUX && !V8_LIBRT_NOT_AVAILABLE
634 
635 static LazyStaticInstance<KernelTimestampClock,
636  DefaultConstructTrait<KernelTimestampClock>,
639 
640 
641 // static
642 TimeTicks TimeTicks::KernelTimestampNow() {
643  return TimeTicks(kernel_tick_clock.Pointer()->Now());
644 }
645 
646 
647 // static
648 bool TimeTicks::KernelTimestampAvailable() {
649  return kernel_tick_clock.Pointer()->Available();
650 }
651 
652 #endif // V8_OS_WIN
653 
654 } } // namespace v8::base
#define OVERRIDE
#define FINAL
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define LAZY_STATIC_INSTANCE_INITIALIZER
Definition: lazy-instance.h:77
#define LAZY_DYNAMIC_INSTANCE_INITIALIZER
Definition: lazy-instance.h:78
#define DCHECK_GE(v1, v2)
Definition: logging.h:208
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_LT(v1, v2)
Definition: logging.h:209
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
void USE(T)
Definition: macros.h:322
#define V8_INT64_C(x)
Definition: macros.h:358
static LazyStaticInstance< KernelTimestampClock, DefaultConstructTrait< KernelTimestampClock >, ThreadSafeInitOnceTrait >::type kernel_tick_clock
Definition: time.cc:637
typedef DWORD(__stdcall *DLL_FUNC_TYPE(SymGetOptions))(VOID)
typedef BOOL(__stdcall *DLL_FUNC_TYPE(SymInitialize))(IN HANDLE hProcess
static LifetimePosition Max(LifetimePosition a, LifetimePosition b)
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20