V8 Project
assembler-arm-inl.h
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been modified
34 // significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
36 
37 #ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
38 #define V8_ARM_ASSEMBLER_ARM_INL_H_
39 
40 #include "src/arm/assembler-arm.h"
41 
42 #include "src/assembler.h"
43 #include "src/debug.h"
44 
45 
46 namespace v8 {
47 namespace internal {
48 
49 
51 
52 
55 }
56 
57 
59  return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
60 }
61 
62 
64  return kNumReservedRegisters;
65 }
66 
67 
70 }
71 
72 
73 // static
76 }
77 
78 
80  DCHECK(!reg.is(kDoubleRegZero));
82  if (reg.code() > kDoubleRegZero.code()) {
83  return reg.code() - kNumReservedRegisters;
84  }
85  return reg.code();
86 }
87 
88 
90  DCHECK(index >= 0 && index < NumAllocatableRegisters());
91  DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
93  if (index >= kDoubleRegZero.code()) {
94  return from_code(index + kNumReservedRegisters);
95  }
96  return from_code(index);
97 }
98 
99 
100 void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
102  // absolute code pointer inside code object moves with the code object.
103  int32_t* p = reinterpret_cast<int32_t*>(pc_);
104  *p += delta; // relocate entry
105  }
106  // We do not use pc relative addressing on ARM, so there is
107  // nothing else to do.
108 }
109 
110 
111 Address RelocInfo::target_address() {
114 }
115 
116 
117 Address RelocInfo::target_address_address() {
119  || rmode_ == EMBEDDED_OBJECT
120  || rmode_ == EXTERNAL_REFERENCE);
121  if (FLAG_enable_ool_constant_pool ||
123  // We return the PC for ool constant pool since this function is used by the
124  // serializerer and expects the address to reside within the code object.
125  return reinterpret_cast<Address>(pc_);
126  } else {
128  return constant_pool_entry_address();
129  }
130 }
131 
132 
133 Address RelocInfo::constant_pool_entry_address() {
135  return Assembler::constant_pool_entry_address(pc_, host_->constant_pool());
136 }
137 
138 
139 int RelocInfo::target_address_size() {
140  return kPointerSize;
141 }
142 
143 
144 void RelocInfo::set_target_address(Address target,
145  WriteBarrierMode write_barrier_mode,
146  ICacheFlushMode icache_flush_mode) {
148  Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
149  if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
150  host() != NULL && IsCodeTarget(rmode_)) {
151  Object* target_code = Code::GetCodeFromTargetAddress(target);
152  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
153  host(), this, HeapObject::cast(target_code));
154  }
155 }
156 
157 
158 Object* RelocInfo::target_object() {
160  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
161 }
162 
163 
164 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
166  return Handle<Object>(reinterpret_cast<Object**>(
168 }
169 
170 
171 void RelocInfo::set_target_object(Object* target,
172  WriteBarrierMode write_barrier_mode,
173  ICacheFlushMode icache_flush_mode) {
176  reinterpret_cast<Address>(target),
177  icache_flush_mode);
178  if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
179  host() != NULL &&
180  target->IsHeapObject()) {
181  host()->GetHeap()->incremental_marking()->RecordWrite(
182  host(), &Memory::Object_at(pc_), HeapObject::cast(target));
183  }
184 }
185 
186 
187 Address RelocInfo::target_reference() {
190 }
191 
192 
193 Address RelocInfo::target_runtime_entry(Assembler* origin) {
195  return target_address();
196 }
197 
198 
199 void RelocInfo::set_target_runtime_entry(Address target,
200  WriteBarrierMode write_barrier_mode,
201  ICacheFlushMode icache_flush_mode) {
203  if (target_address() != target)
204  set_target_address(target, write_barrier_mode, icache_flush_mode);
205 }
206 
207 
208 Handle<Cell> RelocInfo::target_cell_handle() {
210  Address address = Memory::Address_at(pc_);
211  return Handle<Cell>(reinterpret_cast<Cell**>(address));
212 }
213 
214 
215 Cell* RelocInfo::target_cell() {
218 }
219 
220 
221 void RelocInfo::set_target_cell(Cell* cell,
222  WriteBarrierMode write_barrier_mode,
223  ICacheFlushMode icache_flush_mode) {
225  Address address = cell->address() + Cell::kValueOffset;
226  Memory::Address_at(pc_) = address;
227  if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
228  // TODO(1550) We are passing NULL as a slot because cell can never be on
229  // evacuation candidate.
230  host()->GetHeap()->incremental_marking()->RecordWrite(
231  host(), NULL, cell);
232  }
233 }
234 
235 
237 
238 
239 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
240  UNREACHABLE(); // This should never be reached on Arm.
241  return Handle<Object>();
242 }
243 
244 
245 Code* RelocInfo::code_age_stub() {
250 }
251 
252 
253 void RelocInfo::set_code_age_stub(Code* stub,
254  ICacheFlushMode icache_flush_mode) {
258  stub->instruction_start();
259 }
260 
261 
262 Address RelocInfo::call_address() {
263  // The 2 instructions offset assumes patched debug break slot or return
264  // sequence.
265  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
266  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
268 }
269 
270 
271 void RelocInfo::set_call_address(Address target) {
272  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
273  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
275  if (host() != NULL) {
276  Object* target_code = Code::GetCodeFromTargetAddress(target);
277  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
278  host(), this, HeapObject::cast(target_code));
279  }
280 }
281 
282 
283 Object* RelocInfo::call_object() {
284  return *call_object_address();
285 }
286 
287 
288 void RelocInfo::set_call_object(Object* target) {
289  *call_object_address() = target;
290 }
291 
292 
293 Object** RelocInfo::call_object_address() {
294  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
295  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
296  return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
297 }
298 
299 
300 void RelocInfo::WipeOut() {
302  IsCodeTarget(rmode_) ||
306 }
307 
308 
309 bool RelocInfo::IsPatchedReturnSequence() {
310  Instr current_instr = Assembler::instr_at(pc_);
312  // A patched return sequence is:
313  // ldr ip, [pc, #0]
314  // blx ip
315  return Assembler::IsLdrPcImmediateOffset(current_instr) &&
316  Assembler::IsBlxReg(next_instr);
317 }
318 
319 
320 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
321  Instr current_instr = Assembler::instr_at(pc_);
322  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
323 }
324 
325 
326 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
329  visitor->VisitEmbeddedPointer(this);
330  } else if (RelocInfo::IsCodeTarget(mode)) {
331  visitor->VisitCodeTarget(this);
332  } else if (mode == RelocInfo::CELL) {
333  visitor->VisitCell(this);
334  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
335  visitor->VisitExternalReference(this);
336  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
337  visitor->VisitCodeAgeSequence(this);
338  } else if (((RelocInfo::IsJSReturn(mode) &&
339  IsPatchedReturnSequence()) ||
341  IsPatchedDebugBreakSlotSequence())) &&
342  isolate->debug()->has_break_points()) {
343  visitor->VisitDebugTarget(this);
344  } else if (RelocInfo::IsRuntimeEntry(mode)) {
345  visitor->VisitRuntimeEntry(this);
346  }
347 }
348 
349 
350 template<typename StaticVisitor>
351 void RelocInfo::Visit(Heap* heap) {
354  StaticVisitor::VisitEmbeddedPointer(heap, this);
355  } else if (RelocInfo::IsCodeTarget(mode)) {
356  StaticVisitor::VisitCodeTarget(heap, this);
357  } else if (mode == RelocInfo::CELL) {
358  StaticVisitor::VisitCell(heap, this);
359  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
360  StaticVisitor::VisitExternalReference(this);
361  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
362  StaticVisitor::VisitCodeAgeSequence(heap, this);
363  } else if (heap->isolate()->debug()->has_break_points() &&
365  IsPatchedReturnSequence()) ||
367  IsPatchedDebugBreakSlotSequence()))) {
368  StaticVisitor::VisitDebugTarget(heap, this);
369  } else if (RelocInfo::IsRuntimeEntry(mode)) {
370  StaticVisitor::VisitRuntimeEntry(this);
371  }
372 }
373 
374 
375 Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
376  rm_ = no_reg;
377  imm32_ = immediate;
378  rmode_ = rmode;
379 }
380 
381 
382 Operand::Operand(const ExternalReference& f) {
383  rm_ = no_reg;
384  imm32_ = reinterpret_cast<int32_t>(f.address());
386 }
387 
388 
389 Operand::Operand(Smi* value) {
390  rm_ = no_reg;
391  imm32_ = reinterpret_cast<intptr_t>(value);
392  rmode_ = RelocInfo::NONE32;
393 }
394 
395 
396 Operand::Operand(Register rm) {
397  rm_ = rm;
398  rs_ = no_reg;
399  shift_op_ = LSL;
400  shift_imm_ = 0;
401 }
402 
403 
404 bool Operand::is_reg() const {
405  return rm_.is_valid() &&
406  rs_.is(no_reg) &&
407  shift_op_ == LSL &&
408  shift_imm_ == 0;
409 }
410 
411 
413  if (buffer_space() <= kGap) {
414  GrowBuffer();
415  }
416  if (pc_offset() >= next_buffer_check_) {
417  CheckConstPool(false, true);
418  }
419 }
420 
421 
423  CheckBuffer();
424  *reinterpret_cast<Instr*>(pc_) = x;
425  pc_ += kInstrSize;
426 }
427 
428 
430  // Returns the address of the call target from the return address that will
431  // be returned to after a call.
432  // Call sequence on V7 or later is:
433  // movw ip, #... @ call address low 16
434  // movt ip, #... @ call address high 16
435  // blx ip
436  // @ return address
437  // For V6 when the constant pool is unavailable, it is:
438  // mov ip, #... @ call address low 8
439  // orr ip, ip, #... @ call address 2nd 8
440  // orr ip, ip, #... @ call address 3rd 8
441  // orr ip, ip, #... @ call address high 8
442  // blx ip
443  // @ return address
444  // In cases that need frequent patching, the address is in the
445  // constant pool. It could be a small constant pool load:
446  // ldr ip, [pc / pp, #...] @ call address
447  // blx ip
448  // @ return address
449  // Or an extended constant pool load (ARMv7):
450  // movw ip, #...
451  // movt ip, #...
452  // ldr ip, [pc, ip] @ call address
453  // blx ip
454  // @ return address
455  // Or an extended constant pool load (ARMv6):
456  // mov ip, #...
457  // orr ip, ip, #...
458  // orr ip, ip, #...
459  // orr ip, ip, #...
460  // ldr ip, [pc, ip] @ call address
461  // blx ip
462  // @ return address
463  Address candidate = pc - 2 * Assembler::kInstrSize;
464  Instr candidate_instr(Memory::int32_at(candidate));
465  if (IsLdrPcImmediateOffset(candidate_instr) |
466  IsLdrPpImmediateOffset(candidate_instr)) {
467  return candidate;
468  } else {
469  if (IsLdrPpRegOffset(candidate_instr)) {
470  candidate -= Assembler::kInstrSize;
471  }
473  candidate -= 1 * Assembler::kInstrSize;
474  DCHECK(IsMovW(Memory::int32_at(candidate)) &&
476  } else {
477  candidate -= 3 * Assembler::kInstrSize;
478  DCHECK(
479  IsMovImmed(Memory::int32_at(candidate)) &&
483  }
484  return candidate;
485  }
486 }
487 
488 
491 }
492 
493 
497  // Load from constant pool, small section.
498  return pc + kInstrSize * 2;
499  } else {
504  // Load from constant pool, extended section.
505  return pc + kInstrSize * 4;
506  } else {
507  // A movw / movt load immediate.
508  return pc + kInstrSize * 3;
509  }
510  } else {
516  // Load from constant pool, extended section.
517  return pc + kInstrSize * 6;
518  } else {
519  // A mov / orr load immediate.
520  return pc + kInstrSize * 5;
521  }
522  }
523  }
524 }
525 
526 
528  Address constant_pool_entry, Code* code, Address target) {
529  if (FLAG_enable_ool_constant_pool) {
530  set_target_address_at(constant_pool_entry, code, target);
531  } else {
532  Memory::Address_at(constant_pool_entry) = target;
533  }
534 }
535 
536 
537 bool Assembler::is_constant_pool_load(Address pc) {
540  (FLAG_enable_ool_constant_pool &&
543  } else {
545  (FLAG_enable_ool_constant_pool &&
548  }
549 }
550 
551 
552 Address Assembler::constant_pool_entry_address(
553  Address pc, ConstantPoolArray* constant_pool) {
554  if (FLAG_enable_ool_constant_pool) {
555  DCHECK(constant_pool != NULL);
556  int cp_offset;
562  // This is an extended constant pool lookup (ARMv6).
563  Instr mov_instr = instr_at(pc);
564  Instr orr_instr_1 = instr_at(pc + kInstrSize);
565  Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
566  Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize);
567  cp_offset = DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
568  DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3);
569  } else if (IsMovW(Memory::int32_at(pc))) {
572  // This is an extended constant pool lookup (ARMv7).
573  Instruction* movw_instr = Instruction::At(pc);
574  Instruction* movt_instr = Instruction::At(pc + kInstrSize);
575  cp_offset = (movt_instr->ImmedMovwMovtValue() << 16) |
576  movw_instr->ImmedMovwMovtValue();
577  } else {
578  // This is a small constant pool lookup.
581  }
582  return reinterpret_cast<Address>(constant_pool) + cp_offset;
583  } else {
585  Instr instr = Memory::int32_at(pc);
587  }
588 }
589 
590 
592  ConstantPoolArray* constant_pool) {
593  if (is_constant_pool_load(pc)) {
594  // This is a constant pool lookup. Return the value in the constant pool.
595  return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
596  } else if (CpuFeatures::IsSupported(ARMv7)) {
597  // This is an movw / movt immediate load. Return the immediate.
600  Instruction* movw_instr = Instruction::At(pc);
601  Instruction* movt_instr = Instruction::At(pc + kInstrSize);
602  return reinterpret_cast<Address>(
603  (movt_instr->ImmedMovwMovtValue() << 16) |
604  movw_instr->ImmedMovwMovtValue());
605  } else {
606  // This is an mov / orr immediate load. Return the immediate.
611  Instr mov_instr = instr_at(pc);
612  Instr orr_instr_1 = instr_at(pc + kInstrSize);
613  Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
614  Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize);
615  Address ret = reinterpret_cast<Address>(
616  DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
617  DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3));
618  return ret;
619  }
620 }
621 
622 
624  ConstantPoolArray* constant_pool,
625  Address target,
626  ICacheFlushMode icache_flush_mode) {
627  if (is_constant_pool_load(pc)) {
628  // This is a constant pool lookup. Update the entry in the constant pool.
629  Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
630  // Intuitively, we would think it is necessary to always flush the
631  // instruction cache after patching a target address in the code as follows:
632  // CpuFeatures::FlushICache(pc, sizeof(target));
633  // However, on ARM, no instruction is actually patched in the case
634  // of embedded constants of the form:
635  // ldr ip, [pp, #...]
636  // since the instruction accessing this address in the constant pool remains
637  // unchanged.
638  } else if (CpuFeatures::IsSupported(ARMv7)) {
639  // This is an movw / movt immediate load. Patch the immediate embedded in
640  // the instructions.
643  uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
644  uint32_t immediate = reinterpret_cast<uint32_t>(target);
645  instr_ptr[0] = PatchMovwImmediate(instr_ptr[0], immediate & 0xFFFF);
646  instr_ptr[1] = PatchMovwImmediate(instr_ptr[1], immediate >> 16);
649  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
651  }
652  } else {
653  // This is an mov / orr immediate load. Patch the immediate embedded in
654  // the instructions.
659  uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
660  uint32_t immediate = reinterpret_cast<uint32_t>(target);
661  instr_ptr[0] = PatchShiftImm(instr_ptr[0], immediate & kImm8Mask);
662  instr_ptr[1] = PatchShiftImm(instr_ptr[1], immediate & (kImm8Mask << 8));
663  instr_ptr[2] = PatchShiftImm(instr_ptr[2], immediate & (kImm8Mask << 16));
664  instr_ptr[3] = PatchShiftImm(instr_ptr[3], immediate & (kImm8Mask << 24));
669  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
671  }
672  }
673 }
674 
675 
676 } } // namespace v8::internal
677 
678 #endif // V8_ARM_ASSEMBLER_ARM_INL_H_
#define kDoubleRegZero
#define kScratchDoubleReg
static const int kInstrSize
Instruction * pc() const
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
void CheckConstPool(bool force_emit, bool require_jump)
static const int kPatchDebugBreakSlotReturnOffset
static Address break_address_from_return_address(Address pc)
static const int kGap
static bool IsMovImmed(Instr instr)
static Instr PatchShiftImm(Instr instr, int immed)
static bool IsMovW(Instr instr)
static bool IsMovT(Instr instr)
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
static Address target_address_from_return_address(Address pc)
static bool IsOrrImmed(Instr instr)
static Address return_address_from_call_start(Address pc)
static int DecodeShiftImm(Instr instr)
static bool IsLdrPpRegOffset(Instr instr)
static bool IsLdrPcImmediateOffset(Instr instr)
static void deserialization_set_special_target_at(Address constant_pool_entry, Code *code, Address target)
static bool IsLdrPpImmediateOffset(Instr instr)
static bool IsBlxReg(Instr instr)
static const int kPcLoadDelta
static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate)
static int GetLdrRegisterImmediateOffset(Instr instr)
void ret(const Register &xn=lr)
static const int kValueOffset
Definition: objects.h:9446
static Cell * FromValueAddress(Address value)
Definition: objects.h:9431
ConstantPoolArray * constant_pool()
Definition: objects-inl.h:4942
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:5018
static void FlushICache(void *start, size_t size)
static bool IsSupported(CpuFeature f)
Definition: assembler.h:184
bool has_break_points() const
Definition: debug.h:467
Heap * GetHeap() const
Definition: objects-inl.h:1379
Isolate * isolate()
Definition: heap-inl.h:589
IncrementalMarking * incremental_marking()
Definition: heap.h:1205
static Instruction * At(byte *pc)
static Object *& Object_at(Address addr)
Definition: v8memory.h:60
static Address & Address_at(Address addr)
Definition: v8memory.h:56
static int32_t & int32_at(Address addr)
Definition: v8memory.h:28
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
Immediate immediate() const
static bool IsDebugBreakSlot(Mode mode)
Definition: assembler.h:436
static bool IsJSReturn(Mode mode)
Definition: assembler.h:412
static bool IsEmbeddedObject(Mode mode)
Definition: assembler.h:402
static bool IsRuntimeEntry(Mode mode)
Definition: assembler.h:405
static bool IsCodeTarget(Mode mode)
Definition: assembler.h:399
static bool IsExternalReference(Mode mode)
Definition: assembler.h:430
Code * host() const
Definition: assembler.h:463
Mode rmode() const
Definition: assembler.h:459
static bool IsInternalReference(Mode mode)
Definition: assembler.h:433
static bool IsCodeAgeSequence(Mode mode)
Definition: assembler.h:442
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
int int32_t
Definition: unicode.cc:24
const int kPointerSize
Definition: globals.h:129
@ UPDATE_WRITE_BARRIER
Definition: objects.h:235
kSerializedDataOffset Object
Definition: objects-inl.h:5322
const Register pc
byte * Address
Definition: globals.h:101
const Register no_reg
@ SKIP_ICACHE_FLUSH
Definition: assembler.h:293
static const int kNoCodeAgeSequenceLength
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
static const int kNumReservedRegisters
bool is(DwVfpRegister reg) const
static DwVfpRegister from_code(int code)
static int ToAllocationIndex(DwVfpRegister reg)
static int NumAllocatableAliasedRegisters()
static DwVfpRegister FromAllocationIndex(int index)
static const int kMaxNumLowRegisters
static int NumAllocatableRegisters()
static const int kMaxNumAllocatableRegisters
Definition: assembler-arm.h:96