V8 Project
platform-qnx.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // Platform-specific code for QNX goes here. For the POSIX-compatible
6 // parts the implementation is in platform-posix.cc.
7 
8 #include <backtrace.h>
9 #include <pthread.h>
10 #include <semaphore.h>
11 #include <signal.h>
12 #include <stdlib.h>
13 #include <sys/resource.h>
14 #include <sys/time.h>
15 #include <sys/types.h>
16 #include <ucontext.h>
17 
18 // QNX requires memory pages to be marked as executable.
19 // Otherwise, the OS raises an exception when executing code in that page.
20 #include <errno.h>
21 #include <fcntl.h> // open
22 #include <stdarg.h>
23 #include <strings.h> // index
24 #include <sys/mman.h> // mmap & munmap
25 #include <sys/procfs.h>
26 #include <sys/stat.h> // open
27 #include <sys/types.h> // mmap & munmap
28 #include <unistd.h> // sysconf
29 
30 #include <cmath>
31 
32 #undef MAP_TYPE
33 
34 #include "src/base/macros.h"
36 
37 
38 namespace v8 {
39 namespace base {
40 
41 // 0 is never a valid thread id on Qnx since tids and pids share a
42 // name space and pid 0 is reserved (see man 2 kill).
43 static const pthread_t kNoThread = (pthread_t) 0;
44 
45 
46 #ifdef __arm__
47 
48 bool OS::ArmUsingHardFloat() {
49  // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
50  // the Floating Point ABI used (PCS stands for Procedure Call Standard).
51  // We use these as well as a couple of other defines to statically determine
52  // what FP ABI used.
53  // GCC versions 4.4 and below don't support hard-fp.
54  // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
55  // __ARM_PCS_VFP.
56 
57 #define GCC_VERSION (__GNUC__ * 10000 \
58  + __GNUC_MINOR__ * 100 \
59  + __GNUC_PATCHLEVEL__)
60 #if GCC_VERSION >= 40600
61 #if defined(__ARM_PCS_VFP)
62  return true;
63 #else
64  return false;
65 #endif
66 
67 #elif GCC_VERSION < 40500
68  return false;
69 
70 #else
71 #if defined(__ARM_PCS_VFP)
72  return true;
73 #elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
74  !defined(__VFP_FP__)
75  return false;
76 #else
77 #error "Your version of GCC does not report the FP ABI compiled for." \
78  "Please report it on this issue" \
79  "http://code.google.com/p/v8/issues/detail?id=2140"
80 
81 #endif
82 #endif
83 #undef GCC_VERSION
84 }
85 
86 #endif // __arm__
87 
88 
89 const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
90  if (std::isnan(time)) return "";
91  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
92  struct tm* t = localtime(&tv);
93  if (NULL == t) return "";
94  return t->tm_zone;
95 }
96 
97 
98 double OS::LocalTimeOffset(TimezoneCache* cache) {
99  time_t tv = time(NULL);
100  struct tm* t = localtime(&tv);
101  // tm_gmtoff includes any daylight savings offset, so subtract it.
102  return static_cast<double>(t->tm_gmtoff * msPerSecond -
103  (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
104 }
105 
106 
107 void* OS::Allocate(const size_t requested,
108  size_t* allocated,
109  bool is_executable) {
110  const size_t msize = RoundUp(requested, AllocateAlignment());
111  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
112  void* addr = OS::GetRandomMmapAddr();
113  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
114  if (mbase == MAP_FAILED) return NULL;
115  *allocated = msize;
116  return mbase;
117 }
118 
119 
120 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
121  public:
123  : file_(file), memory_(memory), size_(size) { }
125  virtual void* memory() { return memory_; }
126  virtual int size() { return size_; }
127  private:
128  FILE* file_;
129  void* memory_;
130  int size_;
131 };
132 
133 
134 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
135  FILE* file = fopen(name, "r+");
136  if (file == NULL) return NULL;
137 
138  fseek(file, 0, SEEK_END);
139  int size = ftell(file);
140 
141  void* memory =
142  mmap(OS::GetRandomMmapAddr(),
143  size,
144  PROT_READ | PROT_WRITE,
145  MAP_SHARED,
146  fileno(file),
147  0);
148  return new PosixMemoryMappedFile(file, memory, size);
149 }
150 
151 
152 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
153  void* initial) {
154  FILE* file = fopen(name, "w+");
155  if (file == NULL) return NULL;
156  int result = fwrite(initial, size, 1, file);
157  if (result < 1) {
158  fclose(file);
159  return NULL;
160  }
161  void* memory =
162  mmap(OS::GetRandomMmapAddr(),
163  size,
164  PROT_READ | PROT_WRITE,
165  MAP_SHARED,
166  fileno(file),
167  0);
168  return new PosixMemoryMappedFile(file, memory, size);
169 }
170 
171 
173  if (memory_) OS::Free(memory_, size_);
174  fclose(file_);
175 }
176 
177 
178 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
179  std::vector<SharedLibraryAddress> result;
180  procfs_mapinfo *mapinfos = NULL, *mapinfo;
181  int proc_fd, num, i;
182 
183  struct {
184  procfs_debuginfo info;
185  char buff[PATH_MAX];
186  } map;
187 
188  char buf[PATH_MAX + 1];
189  snprintf(buf, PATH_MAX + 1, "/proc/%d/as", getpid());
190 
191  if ((proc_fd = open(buf, O_RDONLY)) == -1) {
192  close(proc_fd);
193  return result;
194  }
195 
196  /* Get the number of map entries. */
197  if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
198  close(proc_fd);
199  return result;
200  }
201 
202  mapinfos = reinterpret_cast<procfs_mapinfo *>(
203  malloc(num * sizeof(procfs_mapinfo)));
204  if (mapinfos == NULL) {
205  close(proc_fd);
206  return result;
207  }
208 
209  /* Fill the map entries. */
210  if (devctl(proc_fd, DCMD_PROC_PAGEDATA,
211  mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
212  free(mapinfos);
213  close(proc_fd);
214  return result;
215  }
216 
217  for (i = 0; i < num; i++) {
218  mapinfo = mapinfos + i;
219  if (mapinfo->flags & MAP_ELF) {
220  map.info.vaddr = mapinfo->vaddr;
221  if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) {
222  continue;
223  }
224  result.push_back(SharedLibraryAddress(
225  map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
226  }
227  }
228  free(mapinfos);
229  close(proc_fd);
230  return result;
231 }
232 
233 
234 void OS::SignalCodeMovingGC() {
235 }
236 
237 
238 // Constants used for mmap.
239 static const int kMmapFd = -1;
240 static const int kMmapFdOffset = 0;
241 
242 
243 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
244 
245 
246 VirtualMemory::VirtualMemory(size_t size)
247  : address_(ReserveRegion(size)), size_(size) { }
248 
249 
250 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
251  : address_(NULL), size_(0) {
252  DCHECK((alignment % OS::AllocateAlignment()) == 0);
253  size_t request_size = RoundUp(size + alignment,
254  static_cast<intptr_t>(OS::AllocateAlignment()));
255  void* reservation = mmap(OS::GetRandomMmapAddr(),
256  request_size,
257  PROT_NONE,
258  MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
259  kMmapFd,
260  kMmapFdOffset);
261  if (reservation == MAP_FAILED) return;
262 
263  uint8_t* base = static_cast<uint8_t*>(reservation);
264  uint8_t* aligned_base = RoundUp(base, alignment);
265  DCHECK_LE(base, aligned_base);
266 
267  // Unmap extra memory reserved before and after the desired block.
268  if (aligned_base != base) {
269  size_t prefix_size = static_cast<size_t>(aligned_base - base);
270  OS::Free(base, prefix_size);
271  request_size -= prefix_size;
272  }
273 
274  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
275  DCHECK_LE(aligned_size, request_size);
276 
277  if (aligned_size != request_size) {
278  size_t suffix_size = request_size - aligned_size;
279  OS::Free(aligned_base + aligned_size, suffix_size);
280  request_size -= suffix_size;
281  }
282 
283  DCHECK(aligned_size == request_size);
284 
285  address_ = static_cast<void*>(aligned_base);
286  size_ = aligned_size;
287 }
288 
289 
290 VirtualMemory::~VirtualMemory() {
291  if (IsReserved()) {
292  bool result = ReleaseRegion(address(), size());
293  DCHECK(result);
294  USE(result);
295  }
296 }
297 
298 
299 bool VirtualMemory::IsReserved() {
300  return address_ != NULL;
301 }
302 
303 
304 void VirtualMemory::Reset() {
305  address_ = NULL;
306  size_ = 0;
307 }
308 
309 
310 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
311  return CommitRegion(address, size, is_executable);
312 }
313 
314 
315 bool VirtualMemory::Uncommit(void* address, size_t size) {
316  return UncommitRegion(address, size);
317 }
318 
319 
320 bool VirtualMemory::Guard(void* address) {
321  OS::Guard(address, OS::CommitPageSize());
322  return true;
323 }
324 
325 
326 void* VirtualMemory::ReserveRegion(size_t size) {
327  void* result = mmap(OS::GetRandomMmapAddr(),
328  size,
329  PROT_NONE,
330  MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
331  kMmapFd,
332  kMmapFdOffset);
333 
334  if (result == MAP_FAILED) return NULL;
335 
336  return result;
337 }
338 
339 
340 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
341  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
342  if (MAP_FAILED == mmap(base,
343  size,
344  prot,
345  MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
346  kMmapFd,
347  kMmapFdOffset)) {
348  return false;
349  }
350 
351  return true;
352 }
353 
354 
355 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
356  return mmap(base,
357  size,
358  PROT_NONE,
359  MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY,
360  kMmapFd,
361  kMmapFdOffset) != MAP_FAILED;
362 }
363 
364 
365 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
366  return munmap(base, size) == 0;
367 }
368 
369 
370 bool VirtualMemory::HasLazyCommits() {
371  return false;
372 }
373 
374 } } // namespace v8::base
static MemoryMappedFile * create(const char *name, int size, void *initial)
static MemoryMappedFile * open(const char *name)
static void * GetRandomMmapAddr()
static size_t AllocateAlignment()
static void SignalCodeMovingGC()
static bool ArmUsingHardFloat()
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static const char * LocalTimezone(double time, TimezoneCache *cache)
static std::vector< SharedLibraryAddress > GetSharedLibraryAddresses()
static double LocalTimeOffset(TimezoneCache *cache)
static void Free(void *address, const size_t size)
static const int msPerSecond
Definition: platform.h:303
PosixMemoryMappedFile(FILE *file, void *memory, int size)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes A file to write the raw context snapshot bytes Write V8 startup blob file(mksnapshot only)") DEFINE_BOOL(profile_hydrogen_code_stub_compilation
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK_LE(v1, v2)
Definition: logging.h:210
#define DCHECK(condition)
Definition: logging.h:205
void USE(T)
Definition: macros.h:322
T RoundUp(T x, intptr_t m)
Definition: macros.h:407
static const int kMmapFdOffset
static const int kMmapFd
static const pthread_t kNoThread
Definition: platform-qnx.cc:43
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20