V8 Project
platform-linux.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // Platform-specific code for Linux goes here. For the POSIX-compatible
6 // parts, the implementation is in platform-posix.cc.
7 
8 #include <pthread.h>
9 #include <semaphore.h>
10 #include <signal.h>
11 #include <stdlib.h>
12 #include <sys/resource.h>
13 #include <sys/time.h>
14 #include <sys/types.h>
15 
16 // Ubuntu Dapper requires memory pages to be marked as
17 // executable. Otherwise, OS raises an exception when executing code
18 // in that page.
19 #include <errno.h>
20 #include <fcntl.h> // open
21 #include <stdarg.h>
22 #include <strings.h> // index
23 #include <sys/mman.h> // mmap & munmap
24 #include <sys/stat.h> // open
25 #include <sys/types.h> // mmap & munmap
26 #include <unistd.h> // sysconf
27 
28 // GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
29 // Old versions of the C library <signal.h> didn't define the type.
30 #if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
31  (defined(__arm__) || defined(__aarch64__)) && \
32  !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
33 #include <asm/sigcontext.h> // NOLINT
34 #endif
35 
36 #if defined(LEAK_SANITIZER)
37 #include <sanitizer/lsan_interface.h>
38 #endif
39 
40 #include <cmath>
41 
42 #undef MAP_TYPE
43 
44 #include "src/base/macros.h"
46 
47 #if V8_OS_NACL
48 #if !defined(MAP_NORESERVE)
49 // PNaCL doesn't have this, so we always grab all of the memory, which is bad.
50 #define MAP_NORESERVE 0
51 #endif
52 #else
53 #include <sys/prctl.h>
54 #include <sys/syscall.h>
55 #endif
56 
57 namespace v8 {
58 namespace base {
59 
60 
61 #ifdef __arm__
62 
63 bool OS::ArmUsingHardFloat() {
64  // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
65  // the Floating Point ABI used (PCS stands for Procedure Call Standard).
66  // We use these as well as a couple of other defines to statically determine
67  // what FP ABI used.
68  // GCC versions 4.4 and below don't support hard-fp.
69  // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
70  // __ARM_PCS_VFP.
71 
72 #define GCC_VERSION (__GNUC__ * 10000 \
73  + __GNUC_MINOR__ * 100 \
74  + __GNUC_PATCHLEVEL__)
75 #if GCC_VERSION >= 40600
76 #if defined(__ARM_PCS_VFP)
77  return true;
78 #else
79  return false;
80 #endif
81 
82 #elif GCC_VERSION < 40500
83  return false;
84 
85 #else
86 #if defined(__ARM_PCS_VFP)
87  return true;
88 #elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
89  !defined(__VFP_FP__)
90  return false;
91 #else
92 #error "Your version of GCC does not report the FP ABI compiled for." \
93  "Please report it on this issue" \
94  "http://code.google.com/p/v8/issues/detail?id=2140"
95 
96 #endif
97 #endif
98 #undef GCC_VERSION
99 }
100 
101 #endif // def __arm__
102 
103 
104 const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
105 #if V8_OS_NACL
106  // Missing support for tm_zone field.
107  return "";
108 #else
109  if (std::isnan(time)) return "";
110  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
111  struct tm* t = localtime(&tv);
112  if (NULL == t) return "";
113  return t->tm_zone;
114 #endif
115 }
116 
117 
118 double OS::LocalTimeOffset(TimezoneCache* cache) {
119 #if V8_OS_NACL
120  // Missing support for tm_zone field.
121  return 0;
122 #else
123  time_t tv = time(NULL);
124  struct tm* t = localtime(&tv);
125  // tm_gmtoff includes any daylight savings offset, so subtract it.
126  return static_cast<double>(t->tm_gmtoff * msPerSecond -
127  (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
128 #endif
129 }
130 
131 
132 void* OS::Allocate(const size_t requested,
133  size_t* allocated,
134  bool is_executable) {
135  const size_t msize = RoundUp(requested, AllocateAlignment());
136  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
137  void* addr = OS::GetRandomMmapAddr();
138  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
139  if (mbase == MAP_FAILED) return NULL;
140  *allocated = msize;
141  return mbase;
142 }
143 
144 
145 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
146  public:
148  : file_(file), memory_(memory), size_(size) { }
150  virtual void* memory() { return memory_; }
151  virtual int size() { return size_; }
152  private:
153  FILE* file_;
154  void* memory_;
155  int size_;
156 };
157 
158 
159 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
160  FILE* file = fopen(name, "r+");
161  if (file == NULL) return NULL;
162 
163  fseek(file, 0, SEEK_END);
164  int size = ftell(file);
165 
166  void* memory =
167  mmap(OS::GetRandomMmapAddr(),
168  size,
169  PROT_READ | PROT_WRITE,
170  MAP_SHARED,
171  fileno(file),
172  0);
173  return new PosixMemoryMappedFile(file, memory, size);
174 }
175 
176 
177 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
178  void* initial) {
179  FILE* file = fopen(name, "w+");
180  if (file == NULL) return NULL;
181  int result = fwrite(initial, size, 1, file);
182  if (result < 1) {
183  fclose(file);
184  return NULL;
185  }
186  void* memory =
187  mmap(OS::GetRandomMmapAddr(),
188  size,
189  PROT_READ | PROT_WRITE,
190  MAP_SHARED,
191  fileno(file),
192  0);
193  return new PosixMemoryMappedFile(file, memory, size);
194 }
195 
196 
198  if (memory_) OS::Free(memory_, size_);
199  fclose(file_);
200 }
201 
202 
203 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
204  std::vector<SharedLibraryAddress> result;
205  // This function assumes that the layout of the file is as follows:
206  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
207  // If we encounter an unexpected situation we abort scanning further entries.
208  FILE* fp = fopen("/proc/self/maps", "r");
209  if (fp == NULL) return result;
210 
211  // Allocate enough room to be able to store a full file name.
212  const int kLibNameLen = FILENAME_MAX + 1;
213  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
214 
215  // This loop will terminate once the scanning hits an EOF.
216  while (true) {
217  uintptr_t start, end;
218  char attr_r, attr_w, attr_x, attr_p;
219  // Parse the addresses and permission bits at the beginning of the line.
220  if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
221  if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
222 
223  int c;
224  if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
225  // Found a read-only executable entry. Skip characters until we reach
226  // the beginning of the filename or the end of the line.
227  do {
228  c = getc(fp);
229  } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '['));
230  if (c == EOF) break; // EOF: Was unexpected, just exit.
231 
232  // Process the filename if found.
233  if ((c == '/') || (c == '[')) {
234  // Push the '/' or '[' back into the stream to be read below.
235  ungetc(c, fp);
236 
237  // Read to the end of the line. Exit if the read fails.
238  if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
239 
240  // Drop the newline character read by fgets. We do not need to check
241  // for a zero-length string because we know that we at least read the
242  // '/' or '[' character.
243  lib_name[strlen(lib_name) - 1] = '\0';
244  } else {
245  // No library name found, just record the raw address range.
246  snprintf(lib_name, kLibNameLen,
247  "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
248  }
249  result.push_back(SharedLibraryAddress(lib_name, start, end));
250  } else {
251  // Entry not describing executable data. Skip to end of line to set up
252  // reading the next entry.
253  do {
254  c = getc(fp);
255  } while ((c != EOF) && (c != '\n'));
256  if (c == EOF) break;
257  }
258  }
259  free(lib_name);
260  fclose(fp);
261  return result;
262 }
263 
264 
265 void OS::SignalCodeMovingGC() {
266  // Support for ll_prof.py.
267  //
268  // The Linux profiler built into the kernel logs all mmap's with
269  // PROT_EXEC so that analysis tools can properly attribute ticks. We
270  // do a mmap with a name known by ll_prof.py and immediately munmap
271  // it. This injects a GC marker into the stream of events generated
272  // by the kernel and allows us to synchronize V8 code log and the
273  // kernel log.
274  int size = sysconf(_SC_PAGESIZE);
275  FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
276  if (f == NULL) {
277  OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
278  OS::Abort();
279  }
280  void* addr = mmap(OS::GetRandomMmapAddr(), size,
281 #if V8_OS_NACL
282  // The Native Client port of V8 uses an interpreter,
283  // so code pages don't need PROT_EXEC.
284  PROT_READ,
285 #else
286  PROT_READ | PROT_EXEC,
287 #endif
288  MAP_PRIVATE, fileno(f), 0);
289  DCHECK(addr != MAP_FAILED);
290  OS::Free(addr, size);
291  fclose(f);
292 }
293 
294 
295 // Constants used for mmap.
296 static const int kMmapFd = -1;
297 static const int kMmapFdOffset = 0;
298 
299 
300 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
301 
302 
303 VirtualMemory::VirtualMemory(size_t size)
304  : address_(ReserveRegion(size)), size_(size) { }
305 
306 
307 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
308  : address_(NULL), size_(0) {
309  DCHECK((alignment % OS::AllocateAlignment()) == 0);
310  size_t request_size = RoundUp(size + alignment,
311  static_cast<intptr_t>(OS::AllocateAlignment()));
312  void* reservation = mmap(OS::GetRandomMmapAddr(),
313  request_size,
314  PROT_NONE,
315  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
316  kMmapFd,
317  kMmapFdOffset);
318  if (reservation == MAP_FAILED) return;
319 
320  uint8_t* base = static_cast<uint8_t*>(reservation);
321  uint8_t* aligned_base = RoundUp(base, alignment);
322  DCHECK_LE(base, aligned_base);
323 
324  // Unmap extra memory reserved before and after the desired block.
325  if (aligned_base != base) {
326  size_t prefix_size = static_cast<size_t>(aligned_base - base);
327  OS::Free(base, prefix_size);
328  request_size -= prefix_size;
329  }
330 
331  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
332  DCHECK_LE(aligned_size, request_size);
333 
334  if (aligned_size != request_size) {
335  size_t suffix_size = request_size - aligned_size;
336  OS::Free(aligned_base + aligned_size, suffix_size);
337  request_size -= suffix_size;
338  }
339 
340  DCHECK(aligned_size == request_size);
341 
342  address_ = static_cast<void*>(aligned_base);
343  size_ = aligned_size;
344 #if defined(LEAK_SANITIZER)
345  __lsan_register_root_region(address_, size_);
346 #endif
347 }
348 
349 
350 VirtualMemory::~VirtualMemory() {
351  if (IsReserved()) {
352  bool result = ReleaseRegion(address(), size());
353  DCHECK(result);
354  USE(result);
355  }
356 }
357 
358 
359 bool VirtualMemory::IsReserved() {
360  return address_ != NULL;
361 }
362 
363 
364 void VirtualMemory::Reset() {
365  address_ = NULL;
366  size_ = 0;
367 }
368 
369 
370 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
371  return CommitRegion(address, size, is_executable);
372 }
373 
374 
375 bool VirtualMemory::Uncommit(void* address, size_t size) {
376  return UncommitRegion(address, size);
377 }
378 
379 
380 bool VirtualMemory::Guard(void* address) {
381  OS::Guard(address, OS::CommitPageSize());
382  return true;
383 }
384 
385 
386 void* VirtualMemory::ReserveRegion(size_t size) {
387  void* result = mmap(OS::GetRandomMmapAddr(),
388  size,
389  PROT_NONE,
390  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
391  kMmapFd,
392  kMmapFdOffset);
393 
394  if (result == MAP_FAILED) return NULL;
395 
396 #if defined(LEAK_SANITIZER)
397  __lsan_register_root_region(result, size);
398 #endif
399  return result;
400 }
401 
402 
403 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
404 #if V8_OS_NACL
405  // The Native Client port of V8 uses an interpreter,
406  // so code pages don't need PROT_EXEC.
407  int prot = PROT_READ | PROT_WRITE;
408 #else
409  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
410 #endif
411  if (MAP_FAILED == mmap(base,
412  size,
413  prot,
414  MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
415  kMmapFd,
416  kMmapFdOffset)) {
417  return false;
418  }
419 
420  return true;
421 }
422 
423 
424 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
425  return mmap(base,
426  size,
427  PROT_NONE,
428  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
429  kMmapFd,
430  kMmapFdOffset) != MAP_FAILED;
431 }
432 
433 
434 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
435 #if defined(LEAK_SANITIZER)
436  __lsan_unregister_root_region(base, size);
437 #endif
438  return munmap(base, size) == 0;
439 }
440 
441 
442 bool VirtualMemory::HasLazyCommits() {
443  return true;
444 }
445 
446 } } // namespace v8::base
static MemoryMappedFile * create(const char *name, int size, void *initial)
static MemoryMappedFile * open(const char *name)
static void * GetRandomMmapAddr()
static size_t AllocateAlignment()
static void SignalCodeMovingGC()
static bool ArmUsingHardFloat()
static void Abort()
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static const char * LocalTimezone(double time, TimezoneCache *cache)
static std::vector< SharedLibraryAddress > GetSharedLibraryAddresses()
static double LocalTimeOffset(TimezoneCache *cache)
static void PrintError(const char *format,...)
static void Free(void *address, const size_t size)
static const int msPerSecond
Definition: platform.h:303
PosixMemoryMappedFile(FILE *file, void *memory, int size)
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes A file to write the raw context snapshot bytes Write V8 startup blob file(mksnapshot only)") DEFINE_BOOL(profile_hydrogen_code_stub_compilation
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK_LE(v1, v2)
Definition: logging.h:210
#define DCHECK(condition)
Definition: logging.h:205
void USE(T)
Definition: macros.h:322
T RoundUp(T x, intptr_t m)
Definition: macros.h:407
#define V8PRIxPTR
Definition: macros.h:363
static const int kMmapFdOffset
static const int kMmapFd
const Register fp
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20