V8 Project
assembler-arm.cc
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
36 
37 #include "src/v8.h"
38 
39 #if V8_TARGET_ARCH_ARM
40 
42 #include "src/base/bits.h"
43 #include "src/base/cpu.h"
44 #include "src/macro-assembler.h"
45 #include "src/serialize.h"
46 
47 namespace v8 {
48 namespace internal {
49 
50 // Get the CPU features enabled by the build. For cross compilation the
51 // preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
52 // can be defined to enable ARMv7 and VFPv3 instructions when building the
53 // snapshot.
54 static unsigned CpuFeaturesImpliedByCompiler() {
55  unsigned answer = 0;
56 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
57  if (FLAG_enable_armv7) answer |= 1u << ARMv7;
58 #endif // CAN_USE_ARMV7_INSTRUCTIONS
59 #ifdef CAN_USE_VFP3_INSTRUCTIONS
60  if (FLAG_enable_vfp3) answer |= 1u << VFP3 | 1u << ARMv7;
61 #endif // CAN_USE_VFP3_INSTRUCTIONS
62 #ifdef CAN_USE_VFP32DREGS
63  if (FLAG_enable_32dregs) answer |= 1u << VFP32DREGS;
64 #endif // CAN_USE_VFP32DREGS
65 #ifdef CAN_USE_NEON
66  if (FLAG_enable_neon) answer |= 1u << NEON;
67 #endif // CAN_USE_VFP32DREGS
68  if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
69  answer |= 1u << UNALIGNED_ACCESSES;
70  }
71 
72  return answer;
73 }
74 
75 
76 void CpuFeatures::ProbeImpl(bool cross_compile) {
77  supported_ |= CpuFeaturesImpliedByCompiler();
78  cache_line_size_ = 64;
79 
80  // Only use statically determined features for cross compile (snapshot).
81  if (cross_compile) return;
82 
83 #ifndef __arm__
84  // For the simulator build, use whatever the flags specify.
85  if (FLAG_enable_armv7) {
86  supported_ |= 1u << ARMv7;
87  if (FLAG_enable_vfp3) supported_ |= 1u << VFP3;
88  if (FLAG_enable_neon) supported_ |= 1u << NEON | 1u << VFP32DREGS;
89  if (FLAG_enable_sudiv) supported_ |= 1u << SUDIV;
90  if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
91  if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
92  }
93  if (FLAG_enable_mls) supported_ |= 1u << MLS;
94  if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
95 
96 #else // __arm__
97  // Probe for additional features at runtime.
98  base::CPU cpu;
99  if (FLAG_enable_vfp3 && cpu.has_vfp3()) {
100  // This implementation also sets the VFP flags if runtime
101  // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
102  // 0406B, page A1-6.
103  supported_ |= 1u << VFP3 | 1u << ARMv7;
104  }
105 
106  if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON;
107  if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV;
108  if (FLAG_enable_mls && cpu.has_thumb2()) supported_ |= 1u << MLS;
109 
110  if (cpu.architecture() >= 7) {
111  if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
112  if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
113  // Use movw/movt for QUALCOMM ARMv7 cores.
114  if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) {
116  }
117  }
118 
119  // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
120  if (cpu.implementer() == base::CPU::ARM &&
121  (cpu.part() == base::CPU::ARM_CORTEX_A5 ||
122  cpu.part() == base::CPU::ARM_CORTEX_A9)) {
123  cache_line_size_ = 32;
124  }
125 
126  if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
127 #endif
128 
130 }
131 
132 
134  const char* arm_arch = NULL;
135  const char* arm_target_type = "";
136  const char* arm_no_probe = "";
137  const char* arm_fpu = "";
138  const char* arm_thumb = "";
139  const char* arm_float_abi = NULL;
140 
141 #if !defined __arm__
142  arm_target_type = " simulator";
143 #endif
144 
145 #if defined ARM_TEST_NO_FEATURE_PROBE
146  arm_no_probe = " noprobe";
147 #endif
148 
149 #if defined CAN_USE_ARMV7_INSTRUCTIONS
150  arm_arch = "arm v7";
151 #else
152  arm_arch = "arm v6";
153 #endif
154 
155 #if defined CAN_USE_NEON
156  arm_fpu = " neon";
157 #elif defined CAN_USE_VFP3_INSTRUCTIONS
158 # if defined CAN_USE_VFP32DREGS
159  arm_fpu = " vfp3";
160 # else
161  arm_fpu = " vfp3-d16";
162 # endif
163 #else
164  arm_fpu = " vfp2";
165 #endif
166 
167 #ifdef __arm__
168  arm_float_abi = base::OS::ArmUsingHardFloat() ? "hard" : "softfp";
169 #elif USE_EABI_HARDFLOAT
170  arm_float_abi = "hard";
171 #else
172  arm_float_abi = "softfp";
173 #endif
174 
175 #if defined __arm__ && (defined __thumb__) || (defined __thumb2__)
176  arm_thumb = " thumb";
177 #endif
178 
179  printf("target%s%s %s%s%s %s\n",
180  arm_target_type, arm_no_probe, arm_arch, arm_fpu, arm_thumb,
181  arm_float_abi);
182 }
183 
184 
186  printf(
187  "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
188  "MOVW_MOVT_IMMEDIATE_LOADS=%d",
196 #ifdef __arm__
197  bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
198 #elif USE_EABI_HARDFLOAT
199  bool eabi_hardfloat = true;
200 #else
201  bool eabi_hardfloat = false;
202 #endif
203  printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
204 }
205 
206 
207 // -----------------------------------------------------------------------------
208 // Implementation of DwVfpRegister
209 
210 const char* DwVfpRegister::AllocationIndexToString(int index) {
211  DCHECK(index >= 0 && index < NumAllocatableRegisters());
212  DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
214  if (index >= kDoubleRegZero.code()) index += kNumReservedRegisters;
215  return VFPRegisters::Name(index, true);
216 }
217 
218 
219 // -----------------------------------------------------------------------------
220 // Implementation of RelocInfo
221 
222 const int RelocInfo::kApplyMask = 0;
223 
224 
226  // The deserializer needs to know whether a pointer is specially coded.  Being
227  // specially coded on ARM means that it is a movw/movt instruction, or is an
228  // out of line constant pool entry.  These only occur if
229  // FLAG_enable_ool_constant_pool is true.
230  return FLAG_enable_ool_constant_pool;
231 }
232 
233 
235  return Assembler::is_constant_pool_load(pc_);
236 }
237 
238 
239 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
240  // Patch the code at the current address with the supplied instructions.
241  Instr* pc = reinterpret_cast<Instr*>(pc_);
242  Instr* instr = reinterpret_cast<Instr*>(instructions);
243  for (int i = 0; i < instruction_count; i++) {
244  *(pc + i) = *(instr + i);
245  }
246 
247  // Indicate that code has changed.
248  CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
249 }
250 
251 
252 // Patch the code at the current PC with a call to the target address.
253 // Additional guard instructions can be added if required.
254 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
255  // Patch the code at the current address with a call to the target.
256  UNIMPLEMENTED();
257 }
258 
259 
260 // -----------------------------------------------------------------------------
261 // Implementation of Operand and MemOperand
262 // See assembler-arm-inl.h for inlined constructors
263 
264 Operand::Operand(Handle<Object> handle) {
265  AllowDeferredHandleDereference using_raw_address;
266  rm_ = no_reg;
267  // Verify all Objects referred by code are NOT in new space.
268  Object* obj = *handle;
269  if (obj->IsHeapObject()) {
270  DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
271  imm32_ = reinterpret_cast<intptr_t>(handle.location());
273  } else {
274  // no relocation needed
275  imm32_ = reinterpret_cast<intptr_t>(obj);
276  rmode_ = RelocInfo::NONE32;
277  }
278 }
279 
280 
281 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
282  DCHECK(is_uint5(shift_imm));
283 
284  rm_ = rm;
285  rs_ = no_reg;
286  shift_op_ = shift_op;
287  shift_imm_ = shift_imm & 31;
288 
289  if ((shift_op == ROR) && (shift_imm == 0)) {
290  // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode
291  // RRX as ROR #0 (See below).
292  shift_op = LSL;
293  } else if (shift_op == RRX) {
294  // encoded as ROR with shift_imm == 0
295  DCHECK(shift_imm == 0);
296  shift_op_ = ROR;
297  shift_imm_ = 0;
298  }
299 }
300 
301 
302 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
303  DCHECK(shift_op != RRX);
304  rm_ = rm;
305  rs_ = no_reg;
306  shift_op_ = shift_op;
307  rs_ = rs;
308 }
309 
310 
311 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
312  rn_ = rn;
313  rm_ = no_reg;
314  offset_ = offset;
315  am_ = am;
316 }
317 
318 
319 MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
320  rn_ = rn;
321  rm_ = rm;
322  shift_op_ = LSL;
323  shift_imm_ = 0;
324  am_ = am;
325 }
326 
327 
328 MemOperand::MemOperand(Register rn, Register rm,
329  ShiftOp shift_op, int shift_imm, AddrMode am) {
330  DCHECK(is_uint5(shift_imm));
331  rn_ = rn;
332  rm_ = rm;
333  shift_op_ = shift_op;
334  shift_imm_ = shift_imm & 31;
335  am_ = am;
336 }
337 
338 
339 NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
340  DCHECK((am == Offset) || (am == PostIndex));
341  rn_ = rn;
342  rm_ = (am == Offset) ? pc : sp;
343  SetAlignment(align);
344 }
345 
346 
347 NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
348  rn_ = rn;
349  rm_ = rm;
350  SetAlignment(align);
351 }
352 
353 
354 void NeonMemOperand::SetAlignment(int align) {
355  switch (align) {
356  case 0:
357  align_ = 0;
358  break;
359  case 64:
360  align_ = 1;
361  break;
362  case 128:
363  align_ = 2;
364  break;
365  case 256:
366  align_ = 3;
367  break;
368  default:
369  UNREACHABLE();
370  align_ = 0;
371  break;
372  }
373 }
374 
375 
376 NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
377  base_ = base;
378  switch (registers_count) {
379  case 1:
380  type_ = nlt_1;
381  break;
382  case 2:
383  type_ = nlt_2;
384  break;
385  case 3:
386  type_ = nlt_3;
387  break;
388  case 4:
389  type_ = nlt_4;
390  break;
391  default:
392  UNREACHABLE();
393  type_ = nlt_1;
394  break;
395  }
396 }
397 
398 
399 // -----------------------------------------------------------------------------
400 // Specific instructions, constants, and masks.
401 
402 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
403 // register r is not encoded.
404 const Instr kPushRegPattern =
405  al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
406 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
407 // register r is not encoded.
408 const Instr kPopRegPattern =
409  al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
410 // ldr rd, [pc, #offset]
411 const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
412 const Instr kLdrPCImmedPattern = 5 * B24 | L | kRegister_pc_Code * B16;
413 // ldr rd, [pp, #offset]
414 const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
415 const Instr kLdrPpImmedPattern = 5 * B24 | L | kRegister_r8_Code * B16;
416 // ldr rd, [pp, rn]
417 const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16;
418 const Instr kLdrPpRegPattern = 7 * B24 | L | kRegister_r8_Code * B16;
419 // vldr dd, [pc, #offset]
420 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
421 const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
422 // vldr dd, [pp, #offset]
423 const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
424 const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
425 // blxcc rm
426 const Instr kBlxRegMask =
427  15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
428 const Instr kBlxRegPattern =
429  B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
430 const Instr kBlxIp = al | kBlxRegPattern | ip.code();
431 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
432 const Instr kMovMvnPattern = 0xd * B21;
433 const Instr kMovMvnFlip = B22;
434 const Instr kMovLeaveCCMask = 0xdff * B16;
435 const Instr kMovLeaveCCPattern = 0x1a0 * B16;
436 const Instr kMovwPattern = 0x30 * B20;
437 const Instr kMovtPattern = 0x34 * B20;
438 const Instr kMovwLeaveCCFlip = 0x5 * B21;
439 const Instr kMovImmedMask = 0x7f * B21;
440 const Instr kMovImmedPattern = 0x1d * B21;
441 const Instr kOrrImmedMask = 0x7f * B21;
442 const Instr kOrrImmedPattern = 0x1c * B21;
443 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
444 const Instr kCmpCmnPattern = 0x15 * B20;
445 const Instr kCmpCmnFlip = B21;
446 const Instr kAddSubFlip = 0x6 * B21;
447 const Instr kAndBicFlip = 0xe * B21;
448 
449 // A mask for the Rd register for push, pop, ldr, str instructions.
450 const Instr kLdrRegFpOffsetPattern =
451  al | B26 | L | Offset | kRegister_fp_Code * B16;
452 const Instr kStrRegFpOffsetPattern =
454 const Instr kLdrRegFpNegOffsetPattern =
456 const Instr kStrRegFpNegOffsetPattern =
458 const Instr kLdrStrInstrTypeMask = 0xffff0000;
459 
460 
461 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
462  : AssemblerBase(isolate, buffer, buffer_size),
463  recorded_ast_id_(TypeFeedbackId::None()),
464  constant_pool_builder_(),
465  positions_recorder_(this) {
466  reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
467  num_pending_32_bit_reloc_info_ = 0;
468  num_pending_64_bit_reloc_info_ = 0;
469  next_buffer_check_ = 0;
470  const_pool_blocked_nesting_ = 0;
471  no_const_pool_before_ = 0;
472  first_const_pool_32_use_ = -1;
473  first_const_pool_64_use_ = -1;
474  last_bound_pos_ = 0;
475  constant_pool_available_ = !FLAG_enable_ool_constant_pool;
476  ClearRecordedAstId();
477 }
478 
479 
480 Assembler::~Assembler() {
481  DCHECK(const_pool_blocked_nesting_ == 0);
482 }
483 
484 
485 void Assembler::GetCode(CodeDesc* desc) {
486  if (!FLAG_enable_ool_constant_pool) {
487  // Emit constant pool if necessary.
488  CheckConstPool(true, false);
489  DCHECK(num_pending_32_bit_reloc_info_ == 0);
490  DCHECK(num_pending_64_bit_reloc_info_ == 0);
491  }
492  // Set up code descriptor.
493  desc->buffer = buffer_;
494  desc->buffer_size = buffer_size_;
495  desc->instr_size = pc_offset();
496  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
497  desc->origin = this;
498 }
499 
500 
501 void Assembler::Align(int m) {
502  DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
503  while ((pc_offset() & (m - 1)) != 0) {
504  nop();
505  }
506 }
507 
508 
509 void Assembler::CodeTargetAlign() {
510  // Preferred alignment of jump targets on some ARM chips.
511  Align(8);
512 }
513 
514 
515 Condition Assembler::GetCondition(Instr instr) {
516  return Instruction::ConditionField(instr);
517 }
518 
519 
520 bool Assembler::IsBranch(Instr instr) {
521  return (instr & (B27 | B25)) == (B27 | B25);
522 }
523 
524 
525 int Assembler::GetBranchOffset(Instr instr) {
526  DCHECK(IsBranch(instr));
527  // Take the jump offset in the lower 24 bits, sign extend it and multiply it
528  // with 4 to get the offset in bytes.
529  return ((instr & kImm24Mask) << 8) >> 6;
530 }
531 
532 
533 bool Assembler::IsLdrRegisterImmediate(Instr instr) {
534  return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
535 }
536 
537 
538 bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
539  return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
540 }
541 
542 
543 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
544  DCHECK(IsLdrRegisterImmediate(instr));
545  bool positive = (instr & B23) == B23;
546  int offset = instr & kOff12Mask; // Zero extended offset.
547  return positive ? offset : -offset;
548 }
549 
550 
551 int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
552  DCHECK(IsVldrDRegisterImmediate(instr));
553  bool positive = (instr & B23) == B23;
554  int offset = instr & kOff8Mask; // Zero extended offset.
555  offset <<= 2;
556  return positive ? offset : -offset;
557 }
558 
559 
560 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
561  DCHECK(IsLdrRegisterImmediate(instr));
562  bool positive = offset >= 0;
563  if (!positive) offset = -offset;
564  DCHECK(is_uint12(offset));
565  // Set bit indicating whether the offset should be added.
566  instr = (instr & ~B23) | (positive ? B23 : 0);
567  // Set the actual offset.
568  return (instr & ~kOff12Mask) | offset;
569 }
570 
571 
572 Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
573  DCHECK(IsVldrDRegisterImmediate(instr));
574  DCHECK((offset & ~3) == offset); // Must be 64-bit aligned.
575  bool positive = offset >= 0;
576  if (!positive) offset = -offset;
577  DCHECK(is_uint10(offset));
578  // Set bit indicating whether the offset should be added.
579  instr = (instr & ~B23) | (positive ? B23 : 0);
580  // Set the actual offset. Its bottom 2 bits are zero.
581  return (instr & ~kOff8Mask) | (offset >> 2);
582 }
583 
584 
585 bool Assembler::IsStrRegisterImmediate(Instr instr) {
586  return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
587 }
588 
589 
590 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
591  DCHECK(IsStrRegisterImmediate(instr));
592  bool positive = offset >= 0;
593  if (!positive) offset = -offset;
594  DCHECK(is_uint12(offset));
595  // Set bit indicating whether the offset should be added.
596  instr = (instr & ~B23) | (positive ? B23 : 0);
597  // Set the actual offset.
598  return (instr & ~kOff12Mask) | offset;
599 }
600 
601 
602 bool Assembler::IsAddRegisterImmediate(Instr instr) {
603  return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
604 }
605 
606 
607 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
608  DCHECK(IsAddRegisterImmediate(instr));
609  DCHECK(offset >= 0);
610  DCHECK(is_uint12(offset));
611  // Set the offset.
612  return (instr & ~kOff12Mask) | offset;
613 }
614 
615 
616 Register Assembler::GetRd(Instr instr) {
617  Register reg;
618  reg.code_ = Instruction::RdValue(instr);
619  return reg;
620 }
621 
622 
623 Register Assembler::GetRn(Instr instr) {
624  Register reg;
625  reg.code_ = Instruction::RnValue(instr);
626  return reg;
627 }
628 
629 
630 Register Assembler::GetRm(Instr instr) {
631  Register reg;
632  reg.code_ = Instruction::RmValue(instr);
633  return reg;
634 }
635 
636 
637 Instr Assembler::GetConsantPoolLoadPattern() {
638  if (FLAG_enable_ool_constant_pool) {
639  return kLdrPpImmedPattern;
640  } else {
641  return kLdrPCImmedPattern;
642  }
643 }
644 
645 
646 Instr Assembler::GetConsantPoolLoadMask() {
647  if (FLAG_enable_ool_constant_pool) {
648  return kLdrPpImmedMask;
649  } else {
650  return kLdrPCImmedMask;
651  }
652 }
653 
654 
655 bool Assembler::IsPush(Instr instr) {
656  return ((instr & ~kRdMask) == kPushRegPattern);
657 }
658 
659 
660 bool Assembler::IsPop(Instr instr) {
661  return ((instr & ~kRdMask) == kPopRegPattern);
662 }
663 
664 
665 bool Assembler::IsStrRegFpOffset(Instr instr) {
666  return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
667 }
668 
669 
670 bool Assembler::IsLdrRegFpOffset(Instr instr) {
671  return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
672 }
673 
674 
675 bool Assembler::IsStrRegFpNegOffset(Instr instr) {
676  return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
677 }
678 
679 
680 bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
681  return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
682 }
683 
684 
685 bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
686  // Check the instruction is indeed a
687  // ldr<cond> <Rd>, [pc +/- offset_12].
688  return (instr & kLdrPCImmedMask) == kLdrPCImmedPattern;
689 }
690 
691 
692 bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
693  // Check the instruction is indeed a
694  // ldr<cond> <Rd>, [pp +/- offset_12].
695  return (instr & kLdrPpImmedMask) == kLdrPpImmedPattern;
696 }
697 
698 
699 bool Assembler::IsLdrPpRegOffset(Instr instr) {
700  // Check the instruction is indeed a
701  // ldr<cond> <Rd>, [pp, +/- <Rm>].
702  return (instr & kLdrPpRegMask) == kLdrPpRegPattern;
703 }
704 
705 
706 Instr Assembler::GetLdrPpRegOffsetPattern() { return kLdrPpRegPattern; }
707 
708 
709 bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
710  // Check the instruction is indeed a
711  // vldr<cond> <Dd>, [pc +/- offset_10].
712  return (instr & kVldrDPCMask) == kVldrDPCPattern;
713 }
714 
715 
716 bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
717  // Check the instruction is indeed a
718  // vldr<cond> <Dd>, [pp +/- offset_10].
719  return (instr & kVldrDPpMask) == kVldrDPpPattern;
720 }
721 
722 
723 bool Assembler::IsBlxReg(Instr instr) {
724  // Check the instruction is indeed a
725  // blxcc <Rm>
726  return (instr & kBlxRegMask) == kBlxRegPattern;
727 }
728 
729 
730 bool Assembler::IsBlxIp(Instr instr) {
731  // Check the instruction is indeed a
732  // blx ip
733  return instr == kBlxIp;
734 }
735 
736 
737 bool Assembler::IsTstImmediate(Instr instr) {
738  return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
739  (I | TST | S);
740 }
741 
742 
743 bool Assembler::IsCmpRegister(Instr instr) {
744  return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
745  (CMP | S);
746 }
747 
748 
749 bool Assembler::IsCmpImmediate(Instr instr) {
750  return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
751  (I | CMP | S);
752 }
753 
754 
755 Register Assembler::GetCmpImmediateRegister(Instr instr) {
756  DCHECK(IsCmpImmediate(instr));
757  return GetRn(instr);
758 }
759 
760 
761 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
762  DCHECK(IsCmpImmediate(instr));
763  return instr & kOff12Mask;
764 }
765 
766 
767 // Labels refer to positions in the (to be) generated code.
768 // There are bound, linked, and unused labels.
769 //
770 // Bound labels refer to known positions in the already
771 // generated code. pos() is the position the label refers to.
772 //
773 // Linked labels refer to unknown positions in the code
774 // to be generated; pos() is the position of the last
775 // instruction using the label.
776 //
777 // The linked labels form a link chain by making the branch offset
778 // in the instruction steam to point to the previous branch
779 // instruction using the same label.
780 //
781 // The link chain is terminated by a branch offset pointing to the
782 // same position.
783 
784 
785 int Assembler::target_at(int pos) {
786  Instr instr = instr_at(pos);
787  if (is_uint24(instr)) {
788  // Emitted link to a label, not part of a branch.
789  return instr;
790  }
791  DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
792  int imm26 = ((instr & kImm24Mask) << 8) >> 6;
793  if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
794  ((instr & B24) != 0)) {
795  // blx uses bit 24 to encode bit 2 of imm26
796  imm26 += 2;
797  }
798  return pos + kPcLoadDelta + imm26;
799 }
800 
801 
802 void Assembler::target_at_put(int pos, int target_pos) {
803  Instr instr = instr_at(pos);
804  if (is_uint24(instr)) {
805  DCHECK(target_pos == pos || target_pos >= 0);
806  // Emitted link to a label, not part of a branch.
807  // Load the position of the label relative to the generated code object
808  // pointer in a register.
809 
810  // Here are the instructions we need to emit:
811  // For ARMv7: target24 => target16_1:target16_0
812  // movw dst, #target16_0
813  // movt dst, #target16_1
814  // For ARMv6: target24 => target8_2:target8_1:target8_0
815  // mov dst, #target8_0
816  // orr dst, dst, #target8_1 << 8
817  // orr dst, dst, #target8_2 << 16
818 
819  // We extract the destination register from the emitted nop instruction.
820  Register dst = Register::from_code(
821  Instruction::RmValue(instr_at(pos + kInstrSize)));
822  DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
823  uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
824  DCHECK(is_uint24(target24));
825  if (is_uint8(target24)) {
826  // If the target fits in a byte then only patch with a mov
827  // instruction.
828  CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
829  1,
830  CodePatcher::DONT_FLUSH);
831  patcher.masm()->mov(dst, Operand(target24));
832  } else {
833  uint16_t target16_0 = target24 & kImm16Mask;
834  uint16_t target16_1 = target24 >> 16;
835  if (CpuFeatures::IsSupported(ARMv7)) {
836  // Patch with movw/movt.
837  if (target16_1 == 0) {
838  CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
839  1,
840  CodePatcher::DONT_FLUSH);
841  patcher.masm()->movw(dst, target16_0);
842  } else {
843  CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
844  2,
845  CodePatcher::DONT_FLUSH);
846  patcher.masm()->movw(dst, target16_0);
847  patcher.masm()->movt(dst, target16_1);
848  }
849  } else {
850  // Patch with a sequence of mov/orr/orr instructions.
851  uint8_t target8_0 = target16_0 & kImm8Mask;
852  uint8_t target8_1 = target16_0 >> 8;
853  uint8_t target8_2 = target16_1 & kImm8Mask;
854  if (target8_2 == 0) {
855  CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
856  2,
857  CodePatcher::DONT_FLUSH);
858  patcher.masm()->mov(dst, Operand(target8_0));
859  patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
860  } else {
861  CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
862  3,
863  CodePatcher::DONT_FLUSH);
864  patcher.masm()->mov(dst, Operand(target8_0));
865  patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
866  patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
867  }
868  }
869  }
870  return;
871  }
872  int imm26 = target_pos - (pos + kPcLoadDelta);
873  DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
874  if (Instruction::ConditionField(instr) == kSpecialCondition) {
875  // blx uses bit 24 to encode bit 2 of imm26
876  DCHECK((imm26 & 1) == 0);
877  instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
878  } else {
879  DCHECK((imm26 & 3) == 0);
880  instr &= ~kImm24Mask;
881  }
882  int imm24 = imm26 >> 2;
883  DCHECK(is_int24(imm24));
884  instr_at_put(pos, instr | (imm24 & kImm24Mask));
885 }
886 
887 
888 void Assembler::print(Label* L) {
889  if (L->is_unused()) {
890  PrintF("unused label\n");
891  } else if (L->is_bound()) {
892  PrintF("bound label to %d\n", L->pos());
893  } else if (L->is_linked()) {
894  Label l = *L;
895  PrintF("unbound label");
896  while (l.is_linked()) {
897  PrintF("@ %d ", l.pos());
898  Instr instr = instr_at(l.pos());
899  if ((instr & ~kImm24Mask) == 0) {
900  PrintF("value\n");
901  } else {
902  DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx
903  Condition cond = Instruction::ConditionField(instr);
904  const char* b;
905  const char* c;
906  if (cond == kSpecialCondition) {
907  b = "blx";
908  c = "";
909  } else {
910  if ((instr & B24) != 0)
911  b = "bl";
912  else
913  b = "b";
914 
915  switch (cond) {
916  case eq: c = "eq"; break;
917  case ne: c = "ne"; break;
918  case hs: c = "hs"; break;
919  case lo: c = "lo"; break;
920  case mi: c = "mi"; break;
921  case pl: c = "pl"; break;
922  case vs: c = "vs"; break;
923  case vc: c = "vc"; break;
924  case hi: c = "hi"; break;
925  case ls: c = "ls"; break;
926  case ge: c = "ge"; break;
927  case lt: c = "lt"; break;
928  case gt: c = "gt"; break;
929  case le: c = "le"; break;
930  case al: c = ""; break;
931  default:
932  c = "";
933  UNREACHABLE();
934  }
935  }
936  PrintF("%s%s\n", b, c);
937  }
938  next(&l);
939  }
940  } else {
941  PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
942  }
943 }
944 
945 
946 void Assembler::bind_to(Label* L, int pos) {
947  DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
948  while (L->is_linked()) {
949  int fixup_pos = L->pos();
950  next(L); // call next before overwriting link with target at fixup_pos
951  target_at_put(fixup_pos, pos);
952  }
953  L->bind_to(pos);
954 
955  // Keep track of the last bound label so we don't eliminate any instructions
956  // before a bound label.
957  if (pos > last_bound_pos_)
958  last_bound_pos_ = pos;
959 }
960 
961 
962 void Assembler::bind(Label* L) {
963  DCHECK(!L->is_bound()); // label can only be bound once
964  bind_to(L, pc_offset());
965 }
966 
967 
968 void Assembler::next(Label* L) {
969  DCHECK(L->is_linked());
970  int link = target_at(L->pos());
971  if (link == L->pos()) {
972  // Branch target points to the same instuction. This is the end of the link
973  // chain.
974  L->Unuse();
975  } else {
976  DCHECK(link >= 0);
977  L->link_to(link);
978  }
979 }
980 
981 
982 // Low-level code emission routines depending on the addressing mode.
983 // If this returns true then you have to use the rotate_imm and immed_8
984 // that it returns, because it may have already changed the instruction
985 // to match them!
986 static bool fits_shifter(uint32_t imm32,
987  uint32_t* rotate_imm,
988  uint32_t* immed_8,
989  Instr* instr) {
990  // imm32 must be unsigned.
991  for (int rot = 0; rot < 16; rot++) {
992  uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
993  if ((imm8 <= 0xff)) {
994  *rotate_imm = rot;
995  *immed_8 = imm8;
996  return true;
997  }
998  }
999  // If the opcode is one with a complementary version and the complementary
1000  // immediate fits, change the opcode.
1001  if (instr != NULL) {
1002  if ((*instr & kMovMvnMask) == kMovMvnPattern) {
1003  if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
1004  *instr ^= kMovMvnFlip;
1005  return true;
1006  } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
1007  if (CpuFeatures::IsSupported(ARMv7)) {
1008  if (imm32 < 0x10000) {
1009  *instr ^= kMovwLeaveCCFlip;
1010  *instr |= Assembler::EncodeMovwImmediate(imm32);
1011  *rotate_imm = *immed_8 = 0; // Not used for movw.
1012  return true;
1013  }
1014  }
1015  }
1016  } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
1017  if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
1018  *instr ^= kCmpCmnFlip;
1019  return true;
1020  }
1021  } else {
1022  Instr alu_insn = (*instr & kALUMask);
1023  if (alu_insn == ADD ||
1024  alu_insn == SUB) {
1025  if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
1026  *instr ^= kAddSubFlip;
1027  return true;
1028  }
1029  } else if (alu_insn == AND ||
1030  alu_insn == BIC) {
1031  if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
1032  *instr ^= kAndBicFlip;
1033  return true;
1034  }
1035  }
1036  }
1037  }
1038  return false;
1039 }
1040 
1041 
1042 // We have to use the temporary register for things that can be relocated even
1043 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
1044 // space. There is no guarantee that the relocated location can be similarly
1045 // encoded.
1046 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1047  if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1048  if (assembler != NULL && assembler->predictable_code_size()) return true;
1049  return assembler->serializer_enabled();
1050  } else if (RelocInfo::IsNone(rmode_)) {
1051  return false;
1052  }
1053  return true;
1054 }
1055 
1056 
1057 static bool use_mov_immediate_load(const Operand& x,
1058  const Assembler* assembler) {
1059  if (assembler != NULL && !assembler->is_constant_pool_available()) {
1060  return true;
1061  } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
1062  (assembler == NULL || !assembler->predictable_code_size())) {
1063  // Prefer movw / movt to constant pool if it is more efficient on the CPU.
1064  return true;
1065  } else if (x.must_output_reloc_info(assembler)) {
1066  // Prefer constant pool if data is likely to be patched.
1067  return false;
1068  } else {
1069  // Otherwise, use immediate load if movw / movt is available.
1070  return CpuFeatures::IsSupported(ARMv7);
1071  }
1072 }
1073 
1074 
1075 int Operand::instructions_required(const Assembler* assembler,
1076  Instr instr) const {
1077  if (rm_.is_valid()) return 1;
1078  uint32_t dummy1, dummy2;
1079  if (must_output_reloc_info(assembler) ||
1080  !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
1081  // The immediate operand cannot be encoded as a shifter operand, or use of
1082  // constant pool is required. First account for the instructions required
1083  // for the constant pool or immediate load
1084  int instructions;
1085  if (use_mov_immediate_load(*this, assembler)) {
1086  // A movw / movt or mov / orr immediate load.
1087  instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
1088  } else if (assembler != NULL && assembler->use_extended_constant_pool()) {
1089  // An extended constant pool load.
1090  instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
1091  } else {
1092  // A small constant pool load.
1093  instructions = 1;
1094  }
1095 
1096  if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set
1097  // For a mov or mvn instruction which doesn't set the condition
1098  // code, the constant pool or immediate load is enough, otherwise we need
1099  // to account for the actual instruction being requested.
1100  instructions += 1;
1101  }
1102  return instructions;
1103  } else {
1104  // No use of constant pool and the immediate operand can be encoded as a
1105  // shifter operand.
1106  return 1;
1107  }
1108 }
1109 
1110 
1111 void Assembler::move_32_bit_immediate(Register rd,
1112  const Operand& x,
1113  Condition cond) {
1114  RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
1115  uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
1116  if (x.must_output_reloc_info(this)) {
1117  RecordRelocInfo(rinfo);
1118  }
1119 
1120  if (use_mov_immediate_load(x, this)) {
1121  Register target = rd.code() == pc.code() ? ip : rd;
1122  if (CpuFeatures::IsSupported(ARMv7)) {
1123  if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
1124  // Make sure the movw/movt doesn't get separated.
1125  BlockConstPoolFor(2);
1126  }
1127  movw(target, imm32 & 0xffff, cond);
1128  movt(target, imm32 >> 16, cond);
1129  } else {
1130  DCHECK(FLAG_enable_ool_constant_pool);
1131  mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
1132  orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
1133  orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
1134  orr(target, target, Operand(imm32 & (kImm8Mask << 24)), LeaveCC, cond);
1135  }
1136  if (target.code() != rd.code()) {
1137  mov(rd, target, LeaveCC, cond);
1138  }
1139  } else {
1140  DCHECK(is_constant_pool_available());
1141  ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
1142  if (section == ConstantPoolArray::EXTENDED_SECTION) {
1143  DCHECK(FLAG_enable_ool_constant_pool);
1144  Register target = rd.code() == pc.code() ? ip : rd;
1145  // Emit instructions to load constant pool offset.
1146  if (CpuFeatures::IsSupported(ARMv7)) {
1147  movw(target, 0, cond);
1148  movt(target, 0, cond);
1149  } else {
1150  mov(target, Operand(0), LeaveCC, cond);
1151  orr(target, target, Operand(0), LeaveCC, cond);
1152  orr(target, target, Operand(0), LeaveCC, cond);
1153  orr(target, target, Operand(0), LeaveCC, cond);
1154  }
1155  // Load from constant pool at offset.
1156  ldr(rd, MemOperand(pp, target), cond);
1157  } else {
1158  DCHECK(section == ConstantPoolArray::SMALL_SECTION);
1159  ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
1160  }
1161  }
1162 }
1163 
1164 
1165 void Assembler::addrmod1(Instr instr,
1166  Register rn,
1167  Register rd,
1168  const Operand& x) {
1169  CheckBuffer();
1170  DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
1171  if (!x.rm_.is_valid()) {
1172  // Immediate.
1173  uint32_t rotate_imm;
1174  uint32_t immed_8;
1175  if (x.must_output_reloc_info(this) ||
1176  !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
1177  // The immediate operand cannot be encoded as a shifter operand, so load
1178  // it first to register ip and change the original instruction to use ip.
1179  // However, if the original instruction is a 'mov rd, x' (not setting the
1180  // condition code), then replace it with a 'ldr rd, [pc]'.
1181  CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
1182  Condition cond = Instruction::ConditionField(instr);
1183  if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
1184  move_32_bit_immediate(rd, x, cond);
1185  } else {
1186  mov(ip, x, LeaveCC, cond);
1187  addrmod1(instr, rn, rd, Operand(ip));
1188  }
1189  return;
1190  }
1191  instr |= I | rotate_imm*B8 | immed_8;
1192  } else if (!x.rs_.is_valid()) {
1193  // Immediate shift.
1194  instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1195  } else {
1196  // Register shift.
1197  DCHECK(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
1198  instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
1199  }
1200  emit(instr | rn.code()*B16 | rd.code()*B12);
1201  if (rn.is(pc) || x.rm_.is(pc)) {
1202  // Block constant pool emission for one instruction after reading pc.
1203  BlockConstPoolFor(1);
1204  }
1205 }
1206 
1207 
1208 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
1209  DCHECK((instr & ~(kCondMask | B | L)) == B26);
1210  int am = x.am_;
1211  if (!x.rm_.is_valid()) {
1212  // Immediate offset.
1213  int offset_12 = x.offset_;
1214  if (offset_12 < 0) {
1215  offset_12 = -offset_12;
1216  am ^= U;
1217  }
1218  if (!is_uint12(offset_12)) {
1219  // Immediate offset cannot be encoded, load it first to register ip
1220  // rn (and rd in a load) should never be ip, or will be trashed.
1221  DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1222  mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
1223  addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
1224  return;
1225  }
1226  DCHECK(offset_12 >= 0); // no masking needed
1227  instr |= offset_12;
1228  } else {
1229  // Register offset (shift_imm_ and shift_op_ are 0) or scaled
1230  // register offset the constructors make sure than both shift_imm_
1231  // and shift_op_ are initialized.
1232  DCHECK(!x.rm_.is(pc));
1233  instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1234  }
1235  DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1236  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1237 }
1238 
1239 
1240 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
1241  DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
1242  DCHECK(x.rn_.is_valid());
1243  int am = x.am_;
1244  if (!x.rm_.is_valid()) {
1245  // Immediate offset.
1246  int offset_8 = x.offset_;
1247  if (offset_8 < 0) {
1248  offset_8 = -offset_8;
1249  am ^= U;
1250  }
1251  if (!is_uint8(offset_8)) {
1252  // Immediate offset cannot be encoded, load it first to register ip
1253  // rn (and rd in a load) should never be ip, or will be trashed.
1254  DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1255  mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
1256  addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1257  return;
1258  }
1259  DCHECK(offset_8 >= 0); // no masking needed
1260  instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
1261  } else if (x.shift_imm_ != 0) {
1262  // Scaled register offset not supported, load index first
1263  // rn (and rd in a load) should never be ip, or will be trashed.
1264  DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1265  mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
1266  Instruction::ConditionField(instr));
1267  addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1268  return;
1269  } else {
1270  // Register offset.
1271  DCHECK((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
1272  instr |= x.rm_.code();
1273  }
1274  DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1275  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1276 }
1277 
1278 
1279 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
1280  DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
1281  DCHECK(rl != 0);
1282  DCHECK(!rn.is(pc));
1283  emit(instr | rn.code()*B16 | rl);
1284 }
1285 
1286 
1287 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
1288  // Unindexed addressing is not encoded by this function.
1289  DCHECK_EQ((B27 | B26),
1290  (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
1291  DCHECK(x.rn_.is_valid() && !x.rm_.is_valid());
1292  int am = x.am_;
1293  int offset_8 = x.offset_;
1294  DCHECK((offset_8 & 3) == 0); // offset must be an aligned word offset
1295  offset_8 >>= 2;
1296  if (offset_8 < 0) {
1297  offset_8 = -offset_8;
1298  am ^= U;
1299  }
1300  DCHECK(is_uint8(offset_8)); // unsigned word offset must fit in a byte
1301  DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1302 
1303  // Post-indexed addressing requires W == 1; different than in addrmod2/3.
1304  if ((am & P) == 0)
1305  am |= W;
1306 
1307  DCHECK(offset_8 >= 0); // no masking needed
1308  emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
1309 }
1310 
1311 
1312 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1313  int target_pos;
1314  if (L->is_bound()) {
1315  target_pos = L->pos();
1316  } else {
1317  if (L->is_linked()) {
1318  // Point to previous instruction that uses the link.
1319  target_pos = L->pos();
1320  } else {
1321  // First entry of the link chain points to itself.
1322  target_pos = pc_offset();
1323  }
1324  L->link_to(pc_offset());
1325  }
1326 
1327  // Block the emission of the constant pool, since the branch instruction must
1328  // be emitted at the pc offset recorded by the label.
1329  BlockConstPoolFor(1);
1330  return target_pos - (pc_offset() + kPcLoadDelta);
1331 }
1332 
1333 
1334 // Branch instructions.
1335 void Assembler::b(int branch_offset, Condition cond) {
1336  DCHECK((branch_offset & 3) == 0);
1337  int imm24 = branch_offset >> 2;
1338  DCHECK(is_int24(imm24));
1339  emit(cond | B27 | B25 | (imm24 & kImm24Mask));
1340 
1341  if (cond == al) {
1342  // Dead code is a good location to emit the constant pool.
1343  CheckConstPool(false, false);
1344  }
1345 }
1346 
1347 
1348 void Assembler::bl(int branch_offset, Condition cond) {
1349  positions_recorder()->WriteRecordedPositions();
1350  DCHECK((branch_offset & 3) == 0);
1351  int imm24 = branch_offset >> 2;
1352  DCHECK(is_int24(imm24));
1353  emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
1354 }
1355 
1356 
1357 void Assembler::blx(int branch_offset) { // v5 and above
1358  positions_recorder()->WriteRecordedPositions();
1359  DCHECK((branch_offset & 1) == 0);
1360  int h = ((branch_offset & 2) >> 1)*B24;
1361  int imm24 = branch_offset >> 2;
1362  DCHECK(is_int24(imm24));
1363  emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
1364 }
1365 
1366 
1367 void Assembler::blx(Register target, Condition cond) { // v5 and above
1368  positions_recorder()->WriteRecordedPositions();
1369  DCHECK(!target.is(pc));
1370  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
1371 }
1372 
1373 
1374 void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
1375  positions_recorder()->WriteRecordedPositions();
1376  DCHECK(!target.is(pc)); // use of pc is actually allowed, but discouraged
1377  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
1378 }
1379 
1380 
1381 // Data-processing instructions.
1382 
1383 void Assembler::and_(Register dst, Register src1, const Operand& src2,
1384  SBit s, Condition cond) {
1385  addrmod1(cond | AND | s, src1, dst, src2);
1386 }
1387 
1388 
1389 void Assembler::eor(Register dst, Register src1, const Operand& src2,
1390  SBit s, Condition cond) {
1391  addrmod1(cond | EOR | s, src1, dst, src2);
1392 }
1393 
1394 
1395 void Assembler::sub(Register dst, Register src1, const Operand& src2,
1396  SBit s, Condition cond) {
1397  addrmod1(cond | SUB | s, src1, dst, src2);
1398 }
1399 
1400 
1401 void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1402  SBit s, Condition cond) {
1403  addrmod1(cond | RSB | s, src1, dst, src2);
1404 }
1405 
1406 
1407 void Assembler::add(Register dst, Register src1, const Operand& src2,
1408  SBit s, Condition cond) {
1409  addrmod1(cond | ADD | s, src1, dst, src2);
1410 }
1411 
1412 
1413 void Assembler::adc(Register dst, Register src1, const Operand& src2,
1414  SBit s, Condition cond) {
1415  addrmod1(cond | ADC | s, src1, dst, src2);
1416 }
1417 
1418 
1419 void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1420  SBit s, Condition cond) {
1421  addrmod1(cond | SBC | s, src1, dst, src2);
1422 }
1423 
1424 
1425 void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1426  SBit s, Condition cond) {
1427  addrmod1(cond | RSC | s, src1, dst, src2);
1428 }
1429 
1430 
1431 void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
1432  addrmod1(cond | TST | S, src1, r0, src2);
1433 }
1434 
1435 
1436 void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
1437  addrmod1(cond | TEQ | S, src1, r0, src2);
1438 }
1439 
1440 
1441 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
1442  addrmod1(cond | CMP | S, src1, r0, src2);
1443 }
1444 
1445 
1446 void Assembler::cmp_raw_immediate(
1447  Register src, int raw_immediate, Condition cond) {
1448  DCHECK(is_uint12(raw_immediate));
1449  emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
1450 }
1451 
1452 
1453 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
1454  addrmod1(cond | CMN | S, src1, r0, src2);
1455 }
1456 
1457 
1458 void Assembler::orr(Register dst, Register src1, const Operand& src2,
1459  SBit s, Condition cond) {
1460  addrmod1(cond | ORR | s, src1, dst, src2);
1461 }
1462 
1463 
1464 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1465  if (dst.is(pc)) {
1466  positions_recorder()->WriteRecordedPositions();
1467  }
1468  // Don't allow nop instructions in the form mov rn, rn to be generated using
1469  // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1470  // or MarkCode(int/NopMarkerTypes) pseudo instructions.
1471  DCHECK(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
1472  addrmod1(cond | MOV | s, r0, dst, src);
1473 }
1474 
1475 
1476 void Assembler::mov_label_offset(Register dst, Label* label) {
1477  if (label->is_bound()) {
1478  mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
1479  } else {
1480  // Emit the link to the label in the code stream followed by extra nop
1481  // instructions.
1482  // If the label is not linked, then start a new link chain by linking it to
1483  // itself, emitting pc_offset().
1484  int link = label->is_linked() ? label->pos() : pc_offset();
1485  label->link_to(pc_offset());
1486 
1487  // When the label is bound, these instructions will be patched with a
1488  // sequence of movw/movt or mov/orr/orr instructions. They will load the
1489  // destination register with the position of the label from the beginning
1490  // of the code.
1491  //
1492  // The link will be extracted from the first instruction and the destination
1493  // register from the second.
1494  // For ARMv7:
1495  // link
1496  // mov dst, dst
1497  // For ARMv6:
1498  // link
1499  // mov dst, dst
1500  // mov dst, dst
1501  //
1502  // When the label gets bound: target_at extracts the link and target_at_put
1503  // patches the instructions.
1504  DCHECK(is_uint24(link));
1505  BlockConstPoolScope block_const_pool(this);
1506  emit(link);
1507  nop(dst.code());
1508  if (!CpuFeatures::IsSupported(ARMv7)) {
1509  nop(dst.code());
1510  }
1511  }
1512 }
1513 
1514 
1515 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1516  DCHECK(CpuFeatures::IsSupported(ARMv7));
1517  emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1518 }
1519 
1520 
1521 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1522  DCHECK(CpuFeatures::IsSupported(ARMv7));
1523  emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1524 }
1525 
1526 
1527 void Assembler::bic(Register dst, Register src1, const Operand& src2,
1528  SBit s, Condition cond) {
1529  addrmod1(cond | BIC | s, src1, dst, src2);
1530 }
1531 
1532 
1533 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
1534  addrmod1(cond | MVN | s, r0, dst, src);
1535 }
1536 
1537 
1538 // Multiply instructions.
1539 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1540  SBit s, Condition cond) {
1541  DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1542  emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1543  src2.code()*B8 | B7 | B4 | src1.code());
1544 }
1545 
1546 
1547 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
1548  Condition cond) {
1549  DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1550  DCHECK(IsEnabled(MLS));
1551  emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
1552  src2.code()*B8 | B7 | B4 | src1.code());
1553 }
1554 
1555 
1556 void Assembler::sdiv(Register dst, Register src1, Register src2,
1557  Condition cond) {
1558  DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1559  DCHECK(IsEnabled(SUDIV));
1560  emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
1561  src2.code()*B8 | B4 | src1.code());
1562 }
1563 
1564 
1565 void Assembler::udiv(Register dst, Register src1, Register src2,
1566  Condition cond) {
1567  DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1568  DCHECK(IsEnabled(SUDIV));
1569  emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 |
1570  src2.code() * B8 | B4 | src1.code());
1571 }
1572 
1573 
1574 void Assembler::mul(Register dst, Register src1, Register src2,
1575  SBit s, Condition cond) {
1576  DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1577  // dst goes in bits 16-19 for this instruction!
1578  emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
1579 }
1580 
1581 
1582 void Assembler::smlal(Register dstL,
1583  Register dstH,
1584  Register src1,
1585  Register src2,
1586  SBit s,
1587  Condition cond) {
1588  DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1589  DCHECK(!dstL.is(dstH));
1590  emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1591  src2.code()*B8 | B7 | B4 | src1.code());
1592 }
1593 
1594 
1595 void Assembler::smull(Register dstL,
1596  Register dstH,
1597  Register src1,
1598  Register src2,
1599  SBit s,
1600  Condition cond) {
1601  DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1602  DCHECK(!dstL.is(dstH));
1603  emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1604  src2.code()*B8 | B7 | B4 | src1.code());
1605 }
1606 
1607 
1608 void Assembler::umlal(Register dstL,
1609  Register dstH,
1610  Register src1,
1611  Register src2,
1612  SBit s,
1613  Condition cond) {
1614  DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1615  DCHECK(!dstL.is(dstH));
1616  emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1617  src2.code()*B8 | B7 | B4 | src1.code());
1618 }
1619 
1620 
1621 void Assembler::umull(Register dstL,
1622  Register dstH,
1623  Register src1,
1624  Register src2,
1625  SBit s,
1626  Condition cond) {
1627  DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1628  DCHECK(!dstL.is(dstH));
1629  emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1630  src2.code()*B8 | B7 | B4 | src1.code());
1631 }
1632 
1633 
1634 // Miscellaneous arithmetic instructions.
1635 void Assembler::clz(Register dst, Register src, Condition cond) {
1636  // v5 and above.
1637  DCHECK(!dst.is(pc) && !src.is(pc));
1638  emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1639  15*B8 | CLZ | src.code());
1640 }
1641 
1642 
1643 // Saturating instructions.
1644 
1645 // Unsigned saturate.
1646 void Assembler::usat(Register dst,
1647  int satpos,
1648  const Operand& src,
1649  Condition cond) {
1650  // v6 and above.
1651  DCHECK(CpuFeatures::IsSupported(ARMv7));
1652  DCHECK(!dst.is(pc) && !src.rm_.is(pc));
1653  DCHECK((satpos >= 0) && (satpos <= 31));
1654  DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1655  DCHECK(src.rs_.is(no_reg));
1656 
1657  int sh = 0;
1658  if (src.shift_op_ == ASR) {
1659  sh = 1;
1660  }
1661 
1662  emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1663  src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1664 }
1665 
1666 
1667 // Bitfield manipulation instructions.
1668 
1669 // Unsigned bit field extract.
1670 // Extracts #width adjacent bits from position #lsb in a register, and
1671 // writes them to the low bits of a destination register.
1672 // ubfx dst, src, #lsb, #width
1673 void Assembler::ubfx(Register dst,
1674  Register src,
1675  int lsb,
1676  int width,
1677  Condition cond) {
1678  // v7 and above.
1679  DCHECK(CpuFeatures::IsSupported(ARMv7));
1680  DCHECK(!dst.is(pc) && !src.is(pc));
1681  DCHECK((lsb >= 0) && (lsb <= 31));
1682  DCHECK((width >= 1) && (width <= (32 - lsb)));
1683  emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1684  lsb*B7 | B6 | B4 | src.code());
1685 }
1686 
1687 
1688 // Signed bit field extract.
1689 // Extracts #width adjacent bits from position #lsb in a register, and
1690 // writes them to the low bits of a destination register. The extracted
1691 // value is sign extended to fill the destination register.
1692 // sbfx dst, src, #lsb, #width
1693 void Assembler::sbfx(Register dst,
1694  Register src,
1695  int lsb,
1696  int width,
1697  Condition cond) {
1698  // v7 and above.
1699  DCHECK(CpuFeatures::IsSupported(ARMv7));
1700  DCHECK(!dst.is(pc) && !src.is(pc));
1701  DCHECK((lsb >= 0) && (lsb <= 31));
1702  DCHECK((width >= 1) && (width <= (32 - lsb)));
1703  emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1704  lsb*B7 | B6 | B4 | src.code());
1705 }
1706 
1707 
1708 // Bit field clear.
1709 // Sets #width adjacent bits at position #lsb in the destination register
1710 // to zero, preserving the value of the other bits.
1711 // bfc dst, #lsb, #width
1712 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1713  // v7 and above.
1714  DCHECK(CpuFeatures::IsSupported(ARMv7));
1715  DCHECK(!dst.is(pc));
1716  DCHECK((lsb >= 0) && (lsb <= 31));
1717  DCHECK((width >= 1) && (width <= (32 - lsb)));
1718  int msb = lsb + width - 1;
1719  emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1720 }
1721 
1722 
1723 // Bit field insert.
1724 // Inserts #width adjacent bits from the low bits of the source register
1725 // into position #lsb of the destination register.
1726 // bfi dst, src, #lsb, #width
1727 void Assembler::bfi(Register dst,
1728  Register src,
1729  int lsb,
1730  int width,
1731  Condition cond) {
1732  // v7 and above.
1733  DCHECK(CpuFeatures::IsSupported(ARMv7));
1734  DCHECK(!dst.is(pc) && !src.is(pc));
1735  DCHECK((lsb >= 0) && (lsb <= 31));
1736  DCHECK((width >= 1) && (width <= (32 - lsb)));
1737  int msb = lsb + width - 1;
1738  emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1739  src.code());
1740 }
1741 
1742 
1743 void Assembler::pkhbt(Register dst,
1744  Register src1,
1745  const Operand& src2,
1746  Condition cond ) {
1747  // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1748  // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1749  // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
1750  DCHECK(!dst.is(pc));
1751  DCHECK(!src1.is(pc));
1752  DCHECK(!src2.rm().is(pc));
1753  DCHECK(!src2.rm().is(no_reg));
1754  DCHECK(src2.rs().is(no_reg));
1755  DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
1756  DCHECK(src2.shift_op() == LSL);
1757  emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1758  src2.shift_imm_*B7 | B4 | src2.rm().code());
1759 }
1760 
1761 
1762 void Assembler::pkhtb(Register dst,
1763  Register src1,
1764  const Operand& src2,
1765  Condition cond) {
1766  // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1767  // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1768  // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
1769  DCHECK(!dst.is(pc));
1770  DCHECK(!src1.is(pc));
1771  DCHECK(!src2.rm().is(pc));
1772  DCHECK(!src2.rm().is(no_reg));
1773  DCHECK(src2.rs().is(no_reg));
1774  DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
1775  DCHECK(src2.shift_op() == ASR);
1776  int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
1777  emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1778  asr*B7 | B6 | B4 | src2.rm().code());
1779 }
1780 
1781 
1782 void Assembler::uxtb(Register dst,
1783  const Operand& src,
1784  Condition cond) {
1785  // Instruction details available in ARM DDI 0406C.b, A8.8.274.
1786  // cond(31-28) | 01101110(27-20) | 1111(19-16) |
1787  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1788  DCHECK(!dst.is(pc));
1789  DCHECK(!src.rm().is(pc));
1790  DCHECK(!src.rm().is(no_reg));
1791  DCHECK(src.rs().is(no_reg));
1792  DCHECK((src.shift_imm_ == 0) ||
1793  (src.shift_imm_ == 8) ||
1794  (src.shift_imm_ == 16) ||
1795  (src.shift_imm_ == 24));
1796  // Operand maps ROR #0 to LSL #0.
1797  DCHECK((src.shift_op() == ROR) ||
1798  ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
1799  emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
1800  ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
1801 }
1802 
1803 
1804 void Assembler::uxtab(Register dst,
1805  Register src1,
1806  const Operand& src2,
1807  Condition cond) {
1808  // Instruction details available in ARM DDI 0406C.b, A8.8.271.
1809  // cond(31-28) | 01101110(27-20) | Rn(19-16) |
1810  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1811  DCHECK(!dst.is(pc));
1812  DCHECK(!src1.is(pc));
1813  DCHECK(!src2.rm().is(pc));
1814  DCHECK(!src2.rm().is(no_reg));
1815  DCHECK(src2.rs().is(no_reg));
1816  DCHECK((src2.shift_imm_ == 0) ||
1817  (src2.shift_imm_ == 8) ||
1818  (src2.shift_imm_ == 16) ||
1819  (src2.shift_imm_ == 24));
1820  // Operand maps ROR #0 to LSL #0.
1821  DCHECK((src2.shift_op() == ROR) ||
1822  ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0)));
1823  emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
1824  ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
1825 }
1826 
1827 
1828 void Assembler::uxtb16(Register dst,
1829  const Operand& src,
1830  Condition cond) {
1831  // Instruction details available in ARM DDI 0406C.b, A8.8.275.
1832  // cond(31-28) | 01101100(27-20) | 1111(19-16) |
1833  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1834  DCHECK(!dst.is(pc));
1835  DCHECK(!src.rm().is(pc));
1836  DCHECK(!src.rm().is(no_reg));
1837  DCHECK(src.rs().is(no_reg));
1838  DCHECK((src.shift_imm_ == 0) ||
1839  (src.shift_imm_ == 8) ||
1840  (src.shift_imm_ == 16) ||
1841  (src.shift_imm_ == 24));
1842  // Operand maps ROR #0 to LSL #0.
1843  DCHECK((src.shift_op() == ROR) ||
1844  ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
1845  emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
1846  ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
1847 }
1848 
1849 
1850 // Status register access instructions.
1851 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1852  DCHECK(!dst.is(pc));
1853  emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1854 }
1855 
1856 
1857 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1858  Condition cond) {
1859  DCHECK(fields >= B16 && fields < B20); // at least one field set
1860  Instr instr;
1861  if (!src.rm_.is_valid()) {
1862  // Immediate.
1863  uint32_t rotate_imm;
1864  uint32_t immed_8;
1865  if (src.must_output_reloc_info(this) ||
1866  !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1867  // Immediate operand cannot be encoded, load it first to register ip.
1868  move_32_bit_immediate(ip, src);
1869  msr(fields, Operand(ip), cond);
1870  return;
1871  }
1872  instr = I | rotate_imm*B8 | immed_8;
1873  } else {
1874  DCHECK(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
1875  instr = src.rm_.code();
1876  }
1877  emit(cond | instr | B24 | B21 | fields | 15*B12);
1878 }
1879 
1880 
1881 // Load/Store instructions.
1882 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1883  if (dst.is(pc)) {
1884  positions_recorder()->WriteRecordedPositions();
1885  }
1886  addrmod2(cond | B26 | L, dst, src);
1887 }
1888 
1889 
1890 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1891  addrmod2(cond | B26, src, dst);
1892 }
1893 
1894 
1895 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1896  addrmod2(cond | B26 | B | L, dst, src);
1897 }
1898 
1899 
1900 void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1901  addrmod2(cond | B26 | B, src, dst);
1902 }
1903 
1904 
1905 void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1906  addrmod3(cond | L | B7 | H | B4, dst, src);
1907 }
1908 
1909 
1910 void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1911  addrmod3(cond | B7 | H | B4, src, dst);
1912 }
1913 
1914 
1915 void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1916  addrmod3(cond | L | B7 | S6 | B4, dst, src);
1917 }
1918 
1919 
1920 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1921  addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1922 }
1923 
1924 
1925 void Assembler::ldrd(Register dst1, Register dst2,
1926  const MemOperand& src, Condition cond) {
1927  DCHECK(IsEnabled(ARMv7));
1928  DCHECK(src.rm().is(no_reg));
1929  DCHECK(!dst1.is(lr)); // r14.
1930  DCHECK_EQ(0, dst1.code() % 2);
1931  DCHECK_EQ(dst1.code() + 1, dst2.code());
1932  addrmod3(cond | B7 | B6 | B4, dst1, src);
1933 }
1934 
1935 
1936 void Assembler::strd(Register src1, Register src2,
1937  const MemOperand& dst, Condition cond) {
1938  DCHECK(dst.rm().is(no_reg));
1939  DCHECK(!src1.is(lr)); // r14.
1940  DCHECK_EQ(0, src1.code() % 2);
1941  DCHECK_EQ(src1.code() + 1, src2.code());
1942  DCHECK(IsEnabled(ARMv7));
1943  addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
1944 }
1945 
1946 
1947 // Preload instructions.
1948 void Assembler::pld(const MemOperand& address) {
1949  // Instruction details available in ARM DDI 0406C.b, A8.8.128.
1950  // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
1951  // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
1952  DCHECK(address.rm().is(no_reg));
1953  DCHECK(address.am() == Offset);
1954  int U = B23;
1955  int offset = address.offset();
1956  if (offset < 0) {
1957  offset = -offset;
1958  U = 0;
1959  }
1960  DCHECK(offset < 4096);
1961  emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
1962  0xf*B12 | offset);
1963 }
1964 
1965 
1966 // Load/Store multiple instructions.
1967 void Assembler::ldm(BlockAddrMode am,
1968  Register base,
1969  RegList dst,
1970  Condition cond) {
1971  // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
1972  DCHECK(base.is(sp) || (dst & sp.bit()) == 0);
1973 
1974  addrmod4(cond | B27 | am | L, base, dst);
1975 
1976  // Emit the constant pool after a function return implemented by ldm ..{..pc}.
1977  if (cond == al && (dst & pc.bit()) != 0) {
1978  // There is a slight chance that the ldm instruction was actually a call,
1979  // in which case it would be wrong to return into the constant pool; we
1980  // recognize this case by checking if the emission of the pool was blocked
1981  // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1982  // the case, we emit a jump over the pool.
1983  CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1984  }
1985 }
1986 
1987 
1988 void Assembler::stm(BlockAddrMode am,
1989  Register base,
1990  RegList src,
1991  Condition cond) {
1992  addrmod4(cond | B27 | am, base, src);
1993 }
1994 
1995 
1996 // Exception-generating instructions and debugging support.
1997 // Stops with a non-negative code less than kNumOfWatchedStops support
1998 // enabling/disabling and a counter feature. See simulator-arm.h .
1999 void Assembler::stop(const char* msg, Condition cond, int32_t code) {
2000 #ifndef __arm__
2001  DCHECK(code >= kDefaultStopCode);
2002  {
2003  // The Simulator will handle the stop instruction and get the message
2004  // address. It expects to find the address just after the svc instruction.
2005  BlockConstPoolScope block_const_pool(this);
2006  if (code >= 0) {
2007  svc(kStopCode + code, cond);
2008  } else {
2009  svc(kStopCode + kMaxStopCode, cond);
2010  }
2011  emit(reinterpret_cast<Instr>(msg));
2012  }
2013 #else // def __arm__
2014  if (cond != al) {
2015  Label skip;
2016  b(&skip, NegateCondition(cond));
2017  bkpt(0);
2018  bind(&skip);
2019  } else {
2020  bkpt(0);
2021  }
2022 #endif // def __arm__
2023 }
2024 
2025 
2026 void Assembler::bkpt(uint32_t imm16) { // v5 and above
2027  DCHECK(is_uint16(imm16));
2028  emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
2029 }
2030 
2031 
2032 void Assembler::svc(uint32_t imm24, Condition cond) {
2033  DCHECK(is_uint24(imm24));
2034  emit(cond | 15*B24 | imm24);
2035 }
2036 
2037 
2038 // Coprocessor instructions.
2039 void Assembler::cdp(Coprocessor coproc,
2040  int opcode_1,
2041  CRegister crd,
2042  CRegister crn,
2043  CRegister crm,
2044  int opcode_2,
2045  Condition cond) {
2046  DCHECK(is_uint4(opcode_1) && is_uint3(opcode_2));
2047  emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
2048  crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
2049 }
2050 
2051 
2052 void Assembler::cdp2(Coprocessor coproc,
2053  int opcode_1,
2054  CRegister crd,
2055  CRegister crn,
2056  CRegister crm,
2057  int opcode_2) { // v5 and above
2058  cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
2059 }
2060 
2061 
2062 void Assembler::mcr(Coprocessor coproc,
2063  int opcode_1,
2064  Register rd,
2065  CRegister crn,
2066  CRegister crm,
2067  int opcode_2,
2068  Condition cond) {
2069  DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
2070  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
2071  rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2072 }
2073 
2074 
2075 void Assembler::mcr2(Coprocessor coproc,
2076  int opcode_1,
2077  Register rd,
2078  CRegister crn,
2079  CRegister crm,
2080  int opcode_2) { // v5 and above
2081  mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2082 }
2083 
2084 
2085 void Assembler::mrc(Coprocessor coproc,
2086  int opcode_1,
2087  Register rd,
2088  CRegister crn,
2089  CRegister crm,
2090  int opcode_2,
2091  Condition cond) {
2092  DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
2093  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
2094  rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2095 }
2096 
2097 
2098 void Assembler::mrc2(Coprocessor coproc,
2099  int opcode_1,
2100  Register rd,
2101  CRegister crn,
2102  CRegister crm,
2103  int opcode_2) { // v5 and above
2104  mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2105 }
2106 
2107 
2108 void Assembler::ldc(Coprocessor coproc,
2109  CRegister crd,
2110  const MemOperand& src,
2111  LFlag l,
2112  Condition cond) {
2113  addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
2114 }
2115 
2116 
2117 void Assembler::ldc(Coprocessor coproc,
2118  CRegister crd,
2119  Register rn,
2120  int option,
2121  LFlag l,
2122  Condition cond) {
2123  // Unindexed addressing.
2124  DCHECK(is_uint8(option));
2125  emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
2126  coproc*B8 | (option & 255));
2127 }
2128 
2129 
2130 void Assembler::ldc2(Coprocessor coproc,
2131  CRegister crd,
2132  const MemOperand& src,
2133  LFlag l) { // v5 and above
2134  ldc(coproc, crd, src, l, kSpecialCondition);
2135 }
2136 
2137 
2138 void Assembler::ldc2(Coprocessor coproc,
2139  CRegister crd,
2140  Register rn,
2141  int option,
2142  LFlag l) { // v5 and above
2143  ldc(coproc, crd, rn, option, l, kSpecialCondition);
2144 }
2145 
2146 
2147 // Support for VFP.
2148 
2149 void Assembler::vldr(const DwVfpRegister dst,
2150  const Register base,
2151  int offset,
2152  const Condition cond) {
2153  // Ddst = MEM(Rbase + offset).
2154  // Instruction details available in ARM DDI 0406C.b, A8-924.
2155  // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
2156  // Vd(15-12) | 1011(11-8) | offset
2157  int u = 1;
2158  if (offset < 0) {
2159  offset = -offset;
2160  u = 0;
2161  }
2162  int vd, d;
2163  dst.split_code(&vd, &d);
2164 
2165  DCHECK(offset >= 0);
2166  if ((offset % 4) == 0 && (offset / 4) < 256) {
2167  emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
2168  0xB*B8 | ((offset / 4) & 255));
2169  } else {
2170  // Larger offsets must be handled by computing the correct address
2171  // in the ip register.
2172  DCHECK(!base.is(ip));
2173  if (u == 1) {
2174  add(ip, base, Operand(offset));
2175  } else {
2176  sub(ip, base, Operand(offset));
2177  }
2178  emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
2179  }
2180 }
2181 
2182 
2183 void Assembler::vldr(const DwVfpRegister dst,
2184  const MemOperand& operand,
2185  const Condition cond) {
2186  DCHECK(operand.am_ == Offset);
2187  if (operand.rm().is_valid()) {
2188  add(ip, operand.rn(),
2189  Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2190  vldr(dst, ip, 0, cond);
2191  } else {
2192  vldr(dst, operand.rn(), operand.offset(), cond);
2193  }
2194 }
2195 
2196 
2197 void Assembler::vldr(const SwVfpRegister dst,
2198  const Register base,
2199  int offset,
2200  const Condition cond) {
2201  // Sdst = MEM(Rbase + offset).
2202  // Instruction details available in ARM DDI 0406A, A8-628.
2203  // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
2204  // Vdst(15-12) | 1010(11-8) | offset
2205  int u = 1;
2206  if (offset < 0) {
2207  offset = -offset;
2208  u = 0;
2209  }
2210  int sd, d;
2211  dst.split_code(&sd, &d);
2212  DCHECK(offset >= 0);
2213 
2214  if ((offset % 4) == 0 && (offset / 4) < 256) {
2215  emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
2216  0xA*B8 | ((offset / 4) & 255));
2217  } else {
2218  // Larger offsets must be handled by computing the correct address
2219  // in the ip register.
2220  DCHECK(!base.is(ip));
2221  if (u == 1) {
2222  add(ip, base, Operand(offset));
2223  } else {
2224  sub(ip, base, Operand(offset));
2225  }
2226  emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2227  }
2228 }
2229 
2230 
2231 void Assembler::vldr(const SwVfpRegister dst,
2232  const MemOperand& operand,
2233  const Condition cond) {
2234  DCHECK(operand.am_ == Offset);
2235  if (operand.rm().is_valid()) {
2236  add(ip, operand.rn(),
2237  Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2238  vldr(dst, ip, 0, cond);
2239  } else {
2240  vldr(dst, operand.rn(), operand.offset(), cond);
2241  }
2242 }
2243 
2244 
2245 void Assembler::vstr(const DwVfpRegister src,
2246  const Register base,
2247  int offset,
2248  const Condition cond) {
2249  // MEM(Rbase + offset) = Dsrc.
2250  // Instruction details available in ARM DDI 0406C.b, A8-1082.
2251  // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
2252  // Vd(15-12) | 1011(11-8) | (offset/4)
2253  int u = 1;
2254  if (offset < 0) {
2255  offset = -offset;
2256  u = 0;
2257  }
2258  DCHECK(offset >= 0);
2259  int vd, d;
2260  src.split_code(&vd, &d);
2261 
2262  if ((offset % 4) == 0 && (offset / 4) < 256) {
2263  emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
2264  ((offset / 4) & 255));
2265  } else {
2266  // Larger offsets must be handled by computing the correct address
2267  // in the ip register.
2268  DCHECK(!base.is(ip));
2269  if (u == 1) {
2270  add(ip, base, Operand(offset));
2271  } else {
2272  sub(ip, base, Operand(offset));
2273  }
2274  emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
2275  }
2276 }
2277 
2278 
2279 void Assembler::vstr(const DwVfpRegister src,
2280  const MemOperand& operand,
2281  const Condition cond) {
2282  DCHECK(operand.am_ == Offset);
2283  if (operand.rm().is_valid()) {
2284  add(ip, operand.rn(),
2285  Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2286  vstr(src, ip, 0, cond);
2287  } else {
2288  vstr(src, operand.rn(), operand.offset(), cond);
2289  }
2290 }
2291 
2292 
2293 void Assembler::vstr(const SwVfpRegister src,
2294  const Register base,
2295  int offset,
2296  const Condition cond) {
2297  // MEM(Rbase + offset) = SSrc.
2298  // Instruction details available in ARM DDI 0406A, A8-786.
2299  // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
2300  // Vdst(15-12) | 1010(11-8) | (offset/4)
2301  int u = 1;
2302  if (offset < 0) {
2303  offset = -offset;
2304  u = 0;
2305  }
2306  int sd, d;
2307  src.split_code(&sd, &d);
2308  DCHECK(offset >= 0);
2309  if ((offset % 4) == 0 && (offset / 4) < 256) {
2310  emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
2311  0xA*B8 | ((offset / 4) & 255));
2312  } else {
2313  // Larger offsets must be handled by computing the correct address
2314  // in the ip register.
2315  DCHECK(!base.is(ip));
2316  if (u == 1) {
2317  add(ip, base, Operand(offset));
2318  } else {
2319  sub(ip, base, Operand(offset));
2320  }
2321  emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2322  }
2323 }
2324 
2325 
2326 void Assembler::vstr(const SwVfpRegister src,
2327  const MemOperand& operand,
2328  const Condition cond) {
2329  DCHECK(operand.am_ == Offset);
2330  if (operand.rm().is_valid()) {
2331  add(ip, operand.rn(),
2332  Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2333  vstr(src, ip, 0, cond);
2334  } else {
2335  vstr(src, operand.rn(), operand.offset(), cond);
2336  }
2337 }
2338 
2339 
2340 void Assembler::vldm(BlockAddrMode am,
2341  Register base,
2342  DwVfpRegister first,
2343  DwVfpRegister last,
2344  Condition cond) {
2345  // Instruction details available in ARM DDI 0406C.b, A8-922.
2346  // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2347  // first(15-12) | 1011(11-8) | (count * 2)
2348  DCHECK_LE(first.code(), last.code());
2349  DCHECK(am == ia || am == ia_w || am == db_w);
2350  DCHECK(!base.is(pc));
2351 
2352  int sd, d;
2353  first.split_code(&sd, &d);
2354  int count = last.code() - first.code() + 1;
2355  DCHECK(count <= 16);
2356  emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2357  0xB*B8 | count*2);
2358 }
2359 
2360 
2361 void Assembler::vstm(BlockAddrMode am,
2362  Register base,
2363  DwVfpRegister first,
2364  DwVfpRegister last,
2365  Condition cond) {
2366  // Instruction details available in ARM DDI 0406C.b, A8-1080.
2367  // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2368  // first(15-12) | 1011(11-8) | (count * 2)
2369  DCHECK_LE(first.code(), last.code());
2370  DCHECK(am == ia || am == ia_w || am == db_w);
2371  DCHECK(!base.is(pc));
2372 
2373  int sd, d;
2374  first.split_code(&sd, &d);
2375  int count = last.code() - first.code() + 1;
2376  DCHECK(count <= 16);
2377  emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2378  0xB*B8 | count*2);
2379 }
2380 
2381 void Assembler::vldm(BlockAddrMode am,
2382  Register base,
2383  SwVfpRegister first,
2384  SwVfpRegister last,
2385  Condition cond) {
2386  // Instruction details available in ARM DDI 0406A, A8-626.
2387  // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2388  // first(15-12) | 1010(11-8) | (count/2)
2389  DCHECK_LE(first.code(), last.code());
2390  DCHECK(am == ia || am == ia_w || am == db_w);
2391  DCHECK(!base.is(pc));
2392 
2393  int sd, d;
2394  first.split_code(&sd, &d);
2395  int count = last.code() - first.code() + 1;
2396  emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2397  0xA*B8 | count);
2398 }
2399 
2400 
2401 void Assembler::vstm(BlockAddrMode am,
2402  Register base,
2403  SwVfpRegister first,
2404  SwVfpRegister last,
2405  Condition cond) {
2406  // Instruction details available in ARM DDI 0406A, A8-784.
2407  // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2408  // first(15-12) | 1011(11-8) | (count/2)
2409  DCHECK_LE(first.code(), last.code());
2410  DCHECK(am == ia || am == ia_w || am == db_w);
2411  DCHECK(!base.is(pc));
2412 
2413  int sd, d;
2414  first.split_code(&sd, &d);
2415  int count = last.code() - first.code() + 1;
2416  emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2417  0xA*B8 | count);
2418 }
2419 
2420 
2421 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2422  uint64_t i;
2423  memcpy(&i, &d, 8);
2424 
2425  *lo = i & 0xffffffff;
2426  *hi = i >> 32;
2427 }
2428 
2429 
2430 // Only works for little endian floating point formats.
2431 // We don't support VFP on the mixed endian floating point platform.
2432 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
2433  DCHECK(CpuFeatures::IsSupported(VFP3));
2434 
2435  // VMOV can accept an immediate of the form:
2436  //
2437  // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
2438  //
2439  // The immediate is encoded using an 8-bit quantity, comprised of two
2440  // 4-bit fields. For an 8-bit immediate of the form:
2441  //
2442  // [abcdefgh]
2443  //
2444  // where a is the MSB and h is the LSB, an immediate 64-bit double can be
2445  // created of the form:
2446  //
2447  // [aBbbbbbb,bbcdefgh,00000000,00000000,
2448  // 00000000,00000000,00000000,00000000]
2449  //
2450  // where B = ~b.
2451  //
2452 
2453  uint32_t lo, hi;
2454  DoubleAsTwoUInt32(d, &lo, &hi);
2455 
2456  // The most obvious constraint is the long block of zeroes.
2457  if ((lo != 0) || ((hi & 0xffff) != 0)) {
2458  return false;
2459  }
2460 
2461  // Bits 62:55 must be all clear or all set.
2462  if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
2463  return false;
2464  }
2465 
2466  // Bit 63 must be NOT bit 62.
2467  if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
2468  return false;
2469  }
2470 
2471  // Create the encoded immediate in the form:
2472  // [00000000,0000abcd,00000000,0000efgh]
2473  *encoding = (hi >> 16) & 0xf; // Low nybble.
2474  *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
2475  *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
2476 
2477  return true;
2478 }
2479 
2480 
2481 void Assembler::vmov(const DwVfpRegister dst,
2482  double imm,
2483  const Register scratch) {
2484  uint32_t enc;
2485  if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
2486  // The double can be encoded in the instruction.
2487  //
2488  // Dd = immediate
2489  // Instruction details available in ARM DDI 0406C.b, A8-936.
2490  // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
2491  // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
2492  int vd, d;
2493  dst.split_code(&vd, &d);
2494  emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
2495  } else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
2496  // TODO(jfb) Temporarily turned off until we have constant blinding or
2497  // some equivalent mitigation: an attacker can otherwise control
2498  // generated data which also happens to be executable, a Very Bad
2499  // Thing indeed.
2500  // Blinding gets tricky because we don't have xor, we probably
2501  // need to add/subtract without losing precision, which requires a
2502  // cookie value that Lithium is probably better positioned to
2503  // choose.
2504  // We could also add a few peepholes here like detecting 0.0 and
2505  // -0.0 and doing a vmov from the sequestered d14, forcing denorms
2506  // to zero (we set flush-to-zero), and normalizing NaN values.
2507  // We could also detect redundant values.
2508  // The code could also randomize the order of values, though
2509  // that's tricky because vldr has a limited reach. Furthermore
2510  // it breaks load locality.
2511  RelocInfo rinfo(pc_, imm);
2512  ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
2513  if (section == ConstantPoolArray::EXTENDED_SECTION) {
2514  DCHECK(FLAG_enable_ool_constant_pool);
2515  // Emit instructions to load constant pool offset.
2516  movw(ip, 0);
2517  movt(ip, 0);
2518  // Load from constant pool at offset.
2519  vldr(dst, MemOperand(pp, ip));
2520  } else {
2521  DCHECK(section == ConstantPoolArray::SMALL_SECTION);
2522  vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
2523  }
2524  } else {
2525  // Synthesise the double from ARM immediates.
2526  uint32_t lo, hi;
2527  DoubleAsTwoUInt32(imm, &lo, &hi);
2528 
2529  if (scratch.is(no_reg)) {
2530  if (dst.code() < 16) {
2531  const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
2532  // Move the low part of the double into the lower of the corresponsing S
2533  // registers of D register dst.
2534  mov(ip, Operand(lo));
2535  vmov(loc.low(), ip);
2536 
2537  // Move the high part of the double into the higher of the
2538  // corresponsing S registers of D register dst.
2539  mov(ip, Operand(hi));
2540  vmov(loc.high(), ip);
2541  } else {
2542  // D16-D31 does not have S registers, so move the low and high parts
2543  // directly to the D register using vmov.32.
2544  // Note: This may be slower, so we only do this when we have to.
2545  mov(ip, Operand(lo));
2546  vmov(dst, VmovIndexLo, ip);
2547  mov(ip, Operand(hi));
2548  vmov(dst, VmovIndexHi, ip);
2549  }
2550  } else {
2551  // Move the low and high parts of the double to a D register in one
2552  // instruction.
2553  mov(ip, Operand(lo));
2554  mov(scratch, Operand(hi));
2555  vmov(dst, ip, scratch);
2556  }
2557  }
2558 }
2559 
2560 
2561 void Assembler::vmov(const SwVfpRegister dst,
2562  const SwVfpRegister src,
2563  const Condition cond) {
2564  // Sd = Sm
2565  // Instruction details available in ARM DDI 0406B, A8-642.
2566  int sd, d, sm, m;
2567  dst.split_code(&sd, &d);
2568  src.split_code(&sm, &m);
2569  emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
2570 }
2571 
2572 
2573 void Assembler::vmov(const DwVfpRegister dst,
2574  const DwVfpRegister src,
2575  const Condition cond) {
2576  // Dd = Dm
2577  // Instruction details available in ARM DDI 0406C.b, A8-938.
2578  // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2579  // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2580  int vd, d;
2581  dst.split_code(&vd, &d);
2582  int vm, m;
2583  src.split_code(&vm, &m);
2584  emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
2585  vm);
2586 }
2587 
2588 
2589 void Assembler::vmov(const DwVfpRegister dst,
2590  const VmovIndex index,
2591  const Register src,
2592  const Condition cond) {
2593  // Dd[index] = Rt
2594  // Instruction details available in ARM DDI 0406C.b, A8-940.
2595  // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
2596  // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2597  DCHECK(index.index == 0 || index.index == 1);
2598  int vd, d;
2599  dst.split_code(&vd, &d);
2600  emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
2601  d*B7 | B4);
2602 }
2603 
2604 
2605 void Assembler::vmov(const Register dst,
2606  const VmovIndex index,
2607  const DwVfpRegister src,
2608  const Condition cond) {
2609  // Dd[index] = Rt
2610  // Instruction details available in ARM DDI 0406C.b, A8.8.342.
2611  // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
2612  // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2613  DCHECK(index.index == 0 || index.index == 1);
2614  int vn, n;
2615  src.split_code(&vn, &n);
2616  emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
2617  0xB*B8 | n*B7 | B4);
2618 }
2619 
2620 
2621 void Assembler::vmov(const DwVfpRegister dst,
2622  const Register src1,
2623  const Register src2,
2624  const Condition cond) {
2625  // Dm = <Rt,Rt2>.
2626  // Instruction details available in ARM DDI 0406C.b, A8-948.
2627  // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2628  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2629  DCHECK(!src1.is(pc) && !src2.is(pc));
2630  int vm, m;
2631  dst.split_code(&vm, &m);
2632  emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
2633  src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2634 }
2635 
2636 
2637 void Assembler::vmov(const Register dst1,
2638  const Register dst2,
2639  const DwVfpRegister src,
2640  const Condition cond) {
2641  // <Rt,Rt2> = Dm.
2642  // Instruction details available in ARM DDI 0406C.b, A8-948.
2643  // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2644  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2645  DCHECK(!dst1.is(pc) && !dst2.is(pc));
2646  int vm, m;
2647  src.split_code(&vm, &m);
2648  emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2649  dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2650 }
2651 
2652 
2653 void Assembler::vmov(const SwVfpRegister dst,
2654  const Register src,
2655  const Condition cond) {
2656  // Sn = Rt.
2657  // Instruction details available in ARM DDI 0406A, A8-642.
2658  // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2659  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2660  DCHECK(!src.is(pc));
2661  int sn, n;
2662  dst.split_code(&sn, &n);
2663  emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
2664 }
2665 
2666 
2667 void Assembler::vmov(const Register dst,
2668  const SwVfpRegister src,
2669  const Condition cond) {
2670  // Rt = Sn.
2671  // Instruction details available in ARM DDI 0406A, A8-642.
2672  // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2673  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2674  DCHECK(!dst.is(pc));
2675  int sn, n;
2676  src.split_code(&sn, &n);
2677  emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
2678 }
2679 
2680 
2681 // Type of data to read from or write to VFP register.
2682 // Used as specifier in generic vcvt instruction.
2683 enum VFPType { S32, U32, F32, F64 };
2684 
2685 
2686 static bool IsSignedVFPType(VFPType type) {
2687  switch (type) {
2688  case S32:
2689  return true;
2690  case U32:
2691  return false;
2692  default:
2693  UNREACHABLE();
2694  return false;
2695  }
2696 }
2697 
2698 
2699 static bool IsIntegerVFPType(VFPType type) {
2700  switch (type) {
2701  case S32:
2702  case U32:
2703  return true;
2704  case F32:
2705  case F64:
2706  return false;
2707  default:
2708  UNREACHABLE();
2709  return false;
2710  }
2711 }
2712 
2713 
2714 static bool IsDoubleVFPType(VFPType type) {
2715  switch (type) {
2716  case F32:
2717  return false;
2718  case F64:
2719  return true;
2720  default:
2721  UNREACHABLE();
2722  return false;
2723  }
2724 }
2725 
2726 
2727 // Split five bit reg_code based on size of reg_type.
2728 // 32-bit register codes are Vm:M
2729 // 64-bit register codes are M:Vm
2730 // where Vm is four bits, and M is a single bit.
2731 static void SplitRegCode(VFPType reg_type,
2732  int reg_code,
2733  int* vm,
2734  int* m) {
2735  DCHECK((reg_code >= 0) && (reg_code <= 31));
2736  if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2737  // 32 bit type.
2738  *m = reg_code & 0x1;
2739  *vm = reg_code >> 1;
2740  } else {
2741  // 64 bit type.
2742  *m = (reg_code & 0x10) >> 4;
2743  *vm = reg_code & 0x0F;
2744  }
2745 }
2746 
2747 
2748 // Encode vcvt.src_type.dst_type instruction.
2749 static Instr EncodeVCVT(const VFPType dst_type,
2750  const int dst_code,
2751  const VFPType src_type,
2752  const int src_code,
2754  const Condition cond) {
2755  DCHECK(src_type != dst_type);
2756  int D, Vd, M, Vm;
2757  SplitRegCode(src_type, src_code, &Vm, &M);
2758  SplitRegCode(dst_type, dst_code, &Vd, &D);
2759 
2760  if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2761  // Conversion between IEEE floating point and 32-bit integer.
2762  // Instruction details available in ARM DDI 0406B, A8.6.295.
2763  // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
2764  // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2765  DCHECK(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
2766 
2767  int sz, opc2, op;
2768 
2769  if (IsIntegerVFPType(dst_type)) {
2770  opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2771  sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2772  op = mode;
2773  } else {
2774  DCHECK(IsIntegerVFPType(src_type));
2775  opc2 = 0x0;
2776  sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2777  op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
2778  }
2779 
2780  return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
2781  Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
2782  } else {
2783  // Conversion between IEEE double and single precision.
2784  // Instruction details available in ARM DDI 0406B, A8.6.298.
2785  // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
2786  // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2787  int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2788  return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2789  Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2790  }
2791 }
2792 
2793 
2794 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2795  const SwVfpRegister src,
2797  const Condition cond) {
2798  emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
2799 }
2800 
2801 
2802 void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2803  const SwVfpRegister src,
2805  const Condition cond) {
2806  emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
2807 }
2808 
2809 
2810 void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2811  const SwVfpRegister src,
2813  const Condition cond) {
2814  emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
2815 }
2816 
2817 
2818 void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2819  const DwVfpRegister src,
2821  const Condition cond) {
2822  emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
2823 }
2824 
2825 
2826 void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2827  const DwVfpRegister src,
2829  const Condition cond) {
2830  emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
2831 }
2832 
2833 
2834 void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2835  const SwVfpRegister src,
2837  const Condition cond) {
2838  emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
2839 }
2840 
2841 
2842 void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2843  const DwVfpRegister src,
2845  const Condition cond) {
2846  emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
2847 }
2848 
2849 
2850 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2851  int fraction_bits,
2852  const Condition cond) {
2853  // Instruction details available in ARM DDI 0406C.b, A8-874.
2854  // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
2855  // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
2856  DCHECK(fraction_bits > 0 && fraction_bits <= 32);
2857  DCHECK(CpuFeatures::IsSupported(VFP3));
2858  int vd, d;
2859  dst.split_code(&vd, &d);
2860  int imm5 = 32 - fraction_bits;
2861  int i = imm5 & 1;
2862  int imm4 = (imm5 >> 1) & 0xf;
2863  emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
2864  vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
2865 }
2866 
2867 
2868 void Assembler::vneg(const DwVfpRegister dst,
2869  const DwVfpRegister src,
2870  const Condition cond) {
2871  // Instruction details available in ARM DDI 0406C.b, A8-968.
2872  // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
2873  // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2874  int vd, d;
2875  dst.split_code(&vd, &d);
2876  int vm, m;
2877  src.split_code(&vm, &m);
2878 
2879  emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
2880  m*B5 | vm);
2881 }
2882 
2883 
2884 void Assembler::vabs(const DwVfpRegister dst,
2885  const DwVfpRegister src,
2886  const Condition cond) {
2887  // Instruction details available in ARM DDI 0406C.b, A8-524.
2888  // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2889  // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2890  int vd, d;
2891  dst.split_code(&vd, &d);
2892  int vm, m;
2893  src.split_code(&vm, &m);
2894  emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
2895  m*B5 | vm);
2896 }
2897 
2898 
2899 void Assembler::vadd(const DwVfpRegister dst,
2900  const DwVfpRegister src1,
2901  const DwVfpRegister src2,
2902  const Condition cond) {
2903  // Dd = vadd(Dn, Dm) double precision floating point addition.
2904  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2905  // Instruction details available in ARM DDI 0406C.b, A8-830.
2906  // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
2907  // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2908  int vd, d;
2909  dst.split_code(&vd, &d);
2910  int vn, n;
2911  src1.split_code(&vn, &n);
2912  int vm, m;
2913  src2.split_code(&vm, &m);
2914  emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2915  n*B7 | m*B5 | vm);
2916 }
2917 
2918 
2919 void Assembler::vsub(const DwVfpRegister dst,
2920  const DwVfpRegister src1,
2921  const DwVfpRegister src2,
2922  const Condition cond) {
2923  // Dd = vsub(Dn, Dm) double precision floating point subtraction.
2924  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2925  // Instruction details available in ARM DDI 0406C.b, A8-1086.
2926  // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
2927  // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2928  int vd, d;
2929  dst.split_code(&vd, &d);
2930  int vn, n;
2931  src1.split_code(&vn, &n);
2932  int vm, m;
2933  src2.split_code(&vm, &m);
2934  emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2935  n*B7 | B6 | m*B5 | vm);
2936 }
2937 
2938 
2939 void Assembler::vmul(const DwVfpRegister dst,
2940  const DwVfpRegister src1,
2941  const DwVfpRegister src2,
2942  const Condition cond) {
2943  // Dd = vmul(Dn, Dm) double precision floating point multiplication.
2944  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2945  // Instruction details available in ARM DDI 0406C.b, A8-960.
2946  // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
2947  // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2948  int vd, d;
2949  dst.split_code(&vd, &d);
2950  int vn, n;
2951  src1.split_code(&vn, &n);
2952  int vm, m;
2953  src2.split_code(&vm, &m);
2954  emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2955  n*B7 | m*B5 | vm);
2956 }
2957 
2958 
2959 void Assembler::vmla(const DwVfpRegister dst,
2960  const DwVfpRegister src1,
2961  const DwVfpRegister src2,
2962  const Condition cond) {
2963  // Instruction details available in ARM DDI 0406C.b, A8-932.
2964  // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
2965  // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
2966  int vd, d;
2967  dst.split_code(&vd, &d);
2968  int vn, n;
2969  src1.split_code(&vn, &n);
2970  int vm, m;
2971  src2.split_code(&vm, &m);
2972  emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
2973  vm);
2974 }
2975 
2976 
2977 void Assembler::vmls(const DwVfpRegister dst,
2978  const DwVfpRegister src1,
2979  const DwVfpRegister src2,
2980  const Condition cond) {
2981  // Instruction details available in ARM DDI 0406C.b, A8-932.
2982  // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
2983  // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
2984  int vd, d;
2985  dst.split_code(&vd, &d);
2986  int vn, n;
2987  src1.split_code(&vn, &n);
2988  int vm, m;
2989  src2.split_code(&vm, &m);
2990  emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
2991  m*B5 | vm);
2992 }
2993 
2994 
2995 void Assembler::vdiv(const DwVfpRegister dst,
2996  const DwVfpRegister src1,
2997  const DwVfpRegister src2,
2998  const Condition cond) {
2999  // Dd = vdiv(Dn, Dm) double precision floating point division.
3000  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
3001  // Instruction details available in ARM DDI 0406C.b, A8-882.
3002  // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
3003  // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3004  int vd, d;
3005  dst.split_code(&vd, &d);
3006  int vn, n;
3007  src1.split_code(&vn, &n);
3008  int vm, m;
3009  src2.split_code(&vm, &m);
3010  emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
3011  vm);
3012 }
3013 
3014 
3015 void Assembler::vcmp(const DwVfpRegister src1,
3016  const DwVfpRegister src2,
3017  const Condition cond) {
3018  // vcmp(Dd, Dm) double precision floating point comparison.
3019  // Instruction details available in ARM DDI 0406C.b, A8-864.
3020  // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
3021  // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3022  int vd, d;
3023  src1.split_code(&vd, &d);
3024  int vm, m;
3025  src2.split_code(&vm, &m);
3026  emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
3027  m*B5 | vm);
3028 }
3029 
3030 
3031 void Assembler::vcmp(const DwVfpRegister src1,
3032  const double src2,
3033  const Condition cond) {
3034  // vcmp(Dd, #0.0) double precision floating point comparison.
3035  // Instruction details available in ARM DDI 0406C.b, A8-864.
3036  // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
3037  // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
3038  DCHECK(src2 == 0.0);
3039  int vd, d;
3040  src1.split_code(&vd, &d);
3041  emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
3042 }
3043 
3044 
3045 void Assembler::vmsr(Register dst, Condition cond) {
3046  // Instruction details available in ARM DDI 0406A, A8-652.
3047  // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
3048  // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
3049  emit(cond | 0xE*B24 | 0xE*B20 | B16 |
3050  dst.code()*B12 | 0xA*B8 | B4);
3051 }
3052 
3053 
3054 void Assembler::vmrs(Register dst, Condition cond) {
3055  // Instruction details available in ARM DDI 0406A, A8-652.
3056  // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
3057  // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
3058  emit(cond | 0xE*B24 | 0xF*B20 | B16 |
3059  dst.code()*B12 | 0xA*B8 | B4);
3060 }
3061 
3062 
3063 void Assembler::vsqrt(const DwVfpRegister dst,
3064  const DwVfpRegister src,
3065  const Condition cond) {
3066  // Instruction details available in ARM DDI 0406C.b, A8-1058.
3067  // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
3068  // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
3069  int vd, d;
3070  dst.split_code(&vd, &d);
3071  int vm, m;
3072  src.split_code(&vm, &m);
3073  emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
3074  m*B5 | vm);
3075 }
3076 
3077 
3078 // Support for NEON.
3079 
3080 void Assembler::vld1(NeonSize size,
3081  const NeonListOperand& dst,
3082  const NeonMemOperand& src) {
3083  // Instruction details available in ARM DDI 0406C.b, A8.8.320.
3084  // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
3085  // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3086  DCHECK(CpuFeatures::IsSupported(NEON));
3087  int vd, d;
3088  dst.base().split_code(&vd, &d);
3089  emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
3090  dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
3091 }
3092 
3093 
3094 void Assembler::vst1(NeonSize size,
3095  const NeonListOperand& src,
3096  const NeonMemOperand& dst) {
3097  // Instruction details available in ARM DDI 0406C.b, A8.8.404.
3098  // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
3099  // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3100  DCHECK(CpuFeatures::IsSupported(NEON));
3101  int vd, d;
3102  src.base().split_code(&vd, &d);
3103  emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
3104  size*B6 | dst.align()*B4 | dst.rm().code());
3105 }
3106 
3107 
3108 void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
3109  // Instruction details available in ARM DDI 0406C.b, A8.8.346.
3110  // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
3111  // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
3112  DCHECK(CpuFeatures::IsSupported(NEON));
3113  int vd, d;
3114  dst.split_code(&vd, &d);
3115  int vm, m;
3116  src.split_code(&vm, &m);
3117  emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
3118  (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
3119 }
3120 
3121 
3122 // Pseudo instructions.
3123 void Assembler::nop(int type) {
3124  // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
3125  // some of the CPU's pipeline and has to issue. Older ARM chips simply used
3126  // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
3127  // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
3128  // a type.
3129  DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
3130  emit(al | 13*B21 | type*B12 | type);
3131 }
3132 
3133 
3134 bool Assembler::IsMovT(Instr instr) {
3135  instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3136  ((kNumRegisters-1)*B12) | // mask out register
3137  EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3138  return instr == kMovtPattern;
3139 }
3140 
3141 
3142 bool Assembler::IsMovW(Instr instr) {
3143  instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3144  ((kNumRegisters-1)*B12) | // mask out destination
3145  EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3146  return instr == kMovwPattern;
3147 }
3148 
3149 
3150 Instr Assembler::GetMovTPattern() { return kMovtPattern; }
3151 
3152 
3153 Instr Assembler::GetMovWPattern() { return kMovwPattern; }
3154 
3155 
3156 Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
3157  DCHECK(immediate < 0x10000);
3158  return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
3159 }
3160 
3161 
3162 Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
3163  instruction &= ~EncodeMovwImmediate(0xffff);
3164  return instruction | EncodeMovwImmediate(immediate);
3165 }
3166 
3167 
3168 int Assembler::DecodeShiftImm(Instr instr) {
3169  int rotate = Instruction::RotateValue(instr) * 2;
3170  int immed8 = Instruction::Immed8Value(instr);
3171  return (immed8 >> rotate) | (immed8 << (32 - rotate));
3172 }
3173 
3174 
3175 Instr Assembler::PatchShiftImm(Instr instr, int immed) {
3176  uint32_t rotate_imm = 0;
3177  uint32_t immed_8 = 0;
3178  bool immed_fits = fits_shifter(immed, &rotate_imm, &immed_8, NULL);
3179  DCHECK(immed_fits);
3180  USE(immed_fits);
3181  return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
3182 }
3183 
3184 
3185 bool Assembler::IsNop(Instr instr, int type) {
3186  DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
3187  // Check for mov rx, rx where x = type.
3188  return instr == (al | 13*B21 | type*B12 | type);
3189 }
3190 
3191 
3192 bool Assembler::IsMovImmed(Instr instr) {
3193  return (instr & kMovImmedMask) == kMovImmedPattern;
3194 }
3195 
3196 
3197 bool Assembler::IsOrrImmed(Instr instr) {
3198  return (instr & kOrrImmedMask) == kOrrImmedPattern;
3199 }
3200 
3201 
3202 // static
3203 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
3204  uint32_t dummy1;
3205  uint32_t dummy2;
3206  return fits_shifter(imm32, &dummy1, &dummy2, NULL);
3207 }
3208 
3209 
3210 bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) {
3211  return is_uint12(abs(imm32));
3212 }
3213 
3214 
3215 // Debugging.
3216 void Assembler::RecordJSReturn() {
3217  positions_recorder()->WriteRecordedPositions();
3218  CheckBuffer();
3219  RecordRelocInfo(RelocInfo::JS_RETURN);
3220 }
3221 
3222 
3223 void Assembler::RecordDebugBreakSlot() {
3224  positions_recorder()->WriteRecordedPositions();
3225  CheckBuffer();
3226  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
3227 }
3228 
3229 
3230 void Assembler::RecordComment(const char* msg) {
3231  if (FLAG_code_comments) {
3232  CheckBuffer();
3233  RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
3234  }
3235 }
3236 
3237 
3238 void Assembler::RecordConstPool(int size) {
3239  // We only need this for debugger support, to correctly compute offsets in the
3240  // code.
3241  RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
3242 }
3243 
3244 
3245 void Assembler::GrowBuffer() {
3246  if (!own_buffer_) FATAL("external code buffer is too small");
3247 
3248  // Compute new buffer size.
3249  CodeDesc desc; // the new buffer
3250  if (buffer_size_ < 1 * MB) {
3251  desc.buffer_size = 2*buffer_size_;
3252  } else {
3253  desc.buffer_size = buffer_size_ + 1*MB;
3254  }
3255  CHECK_GT(desc.buffer_size, 0); // no overflow
3256 
3257  // Set up new buffer.
3258  desc.buffer = NewArray<byte>(desc.buffer_size);
3259 
3260  desc.instr_size = pc_offset();
3261  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
3262 
3263  // Copy the data.
3264  int pc_delta = desc.buffer - buffer_;
3265  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
3266  MemMove(desc.buffer, buffer_, desc.instr_size);
3267  MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
3268  desc.reloc_size);
3269 
3270  // Switch buffers.
3271  DeleteArray(buffer_);
3272  buffer_ = desc.buffer;
3273  buffer_size_ = desc.buffer_size;
3274  pc_ += pc_delta;
3275  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3276  reloc_info_writer.last_pc() + pc_delta);
3277 
3278  // None of our relocation types are pc relative pointing outside the code
3279  // buffer nor pc absolute pointing inside the code buffer, so there is no need
3280  // to relocate any emitted relocation entries.
3281 
3282  // Relocate pending relocation entries.
3283  for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3284  RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3285  DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
3286  rinfo.rmode() != RelocInfo::POSITION);
3287  if (rinfo.rmode() != RelocInfo::JS_RETURN) {
3288  rinfo.set_pc(rinfo.pc() + pc_delta);
3289  }
3290  }
3291  for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3292  RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3293  DCHECK(rinfo.rmode() == RelocInfo::NONE64);
3294  rinfo.set_pc(rinfo.pc() + pc_delta);
3295  }
3296  constant_pool_builder_.Relocate(pc_delta);
3297 }
3298 
3299 
3300 void Assembler::db(uint8_t data) {
3301  // No relocation info should be pending while using db. db is used
3302  // to write pure data with no pointers and the constant pool should
3303  // be emitted before using db.
3304  DCHECK(num_pending_32_bit_reloc_info_ == 0);
3305  DCHECK(num_pending_64_bit_reloc_info_ == 0);
3306  CheckBuffer();
3307  *reinterpret_cast<uint8_t*>(pc_) = data;
3308  pc_ += sizeof(uint8_t);
3309 }
3310 
3311 
3312 void Assembler::dd(uint32_t data) {
3313  // No relocation info should be pending while using dd. dd is used
3314  // to write pure data with no pointers and the constant pool should
3315  // be emitted before using dd.
3316  DCHECK(num_pending_32_bit_reloc_info_ == 0);
3317  DCHECK(num_pending_64_bit_reloc_info_ == 0);
3318  CheckBuffer();
3319  *reinterpret_cast<uint32_t*>(pc_) = data;
3320  pc_ += sizeof(uint32_t);
3321 }
3322 
3323 
3324 void Assembler::emit_code_stub_address(Code* stub) {
3325  CheckBuffer();
3326  *reinterpret_cast<uint32_t*>(pc_) =
3327  reinterpret_cast<uint32_t>(stub->instruction_start());
3328  pc_ += sizeof(uint32_t);
3329 }
3330 
3331 
3332 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3333  RelocInfo rinfo(pc_, rmode, data, NULL);
3334  RecordRelocInfo(rinfo);
3335 }
3336 
3337 
3338 void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
3339  if (!RelocInfo::IsNone(rinfo.rmode())) {
3340  // Don't record external references unless the heap will be serialized.
3341  if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE &&
3342  !serializer_enabled() && !emit_debug_code()) {
3343  return;
3344  }
3345  DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
3346  if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
3347  RelocInfo reloc_info_with_ast_id(rinfo.pc(),
3348  rinfo.rmode(),
3349  RecordedAstId().ToInt(),
3350  NULL);
3351  ClearRecordedAstId();
3352  reloc_info_writer.Write(&reloc_info_with_ast_id);
3353  } else {
3354  reloc_info_writer.Write(&rinfo);
3355  }
3356  }
3357 }
3358 
3359 
3360 ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry(
3361  const RelocInfo& rinfo) {
3362  if (FLAG_enable_ool_constant_pool) {
3363  return constant_pool_builder_.AddEntry(this, rinfo);
3364  } else {
3365  if (rinfo.rmode() == RelocInfo::NONE64) {
3366  DCHECK(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
3367  if (num_pending_64_bit_reloc_info_ == 0) {
3368  first_const_pool_64_use_ = pc_offset();
3369  }
3370  pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
3371  } else {
3372  DCHECK(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
3373  if (num_pending_32_bit_reloc_info_ == 0) {
3374  first_const_pool_32_use_ = pc_offset();
3375  }
3376  pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
3377  }
3378  // Make sure the constant pool is not emitted in place of the next
3379  // instruction for which we just recorded relocation info.
3380  BlockConstPoolFor(1);
3381  return ConstantPoolArray::SMALL_SECTION;
3382  }
3383 }
3384 
3385 
3386 void Assembler::BlockConstPoolFor(int instructions) {
3387  if (FLAG_enable_ool_constant_pool) {
3388  // Should be a no-op if using an out-of-line constant pool.
3389  DCHECK(num_pending_32_bit_reloc_info_ == 0);
3390  DCHECK(num_pending_64_bit_reloc_info_ == 0);
3391  return;
3392  }
3393 
3394  int pc_limit = pc_offset() + instructions * kInstrSize;
3395  if (no_const_pool_before_ < pc_limit) {
3396  // Max pool start (if we need a jump and an alignment).
3397 #ifdef DEBUG
3398  int start = pc_limit + kInstrSize + 2 * kPointerSize;
3399  DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
3400  (start - first_const_pool_32_use_ +
3401  num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
3402  DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
3403  (start - first_const_pool_64_use_ < kMaxDistToFPPool));
3404 #endif
3405  no_const_pool_before_ = pc_limit;
3406  }
3407 
3408  if (next_buffer_check_ < no_const_pool_before_) {
3409  next_buffer_check_ = no_const_pool_before_;
3410  }
3411 }
3412 
3413 
3414 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
3415  if (FLAG_enable_ool_constant_pool) {
3416  // Should be a no-op if using an out-of-line constant pool.
3417  DCHECK(num_pending_32_bit_reloc_info_ == 0);
3418  DCHECK(num_pending_64_bit_reloc_info_ == 0);
3419  return;
3420  }
3421 
3422  // Some short sequence of instruction mustn't be broken up by constant pool
3423  // emission, such sequences are protected by calls to BlockConstPoolFor and
3424  // BlockConstPoolScope.
3425  if (is_const_pool_blocked()) {
3426  // Something is wrong if emission is forced and blocked at the same time.
3427  DCHECK(!force_emit);
3428  return;
3429  }
3430 
3431  // There is nothing to do if there are no pending constant pool entries.
3432  if ((num_pending_32_bit_reloc_info_ == 0) &&
3433  (num_pending_64_bit_reloc_info_ == 0)) {
3434  // Calculate the offset of the next check.
3435  next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3436  return;
3437  }
3438 
3439  // Check that the code buffer is large enough before emitting the constant
3440  // pool (include the jump over the pool and the constant pool marker and
3441  // the gap to the relocation information).
3442  int jump_instr = require_jump ? kInstrSize : 0;
3443  int size_up_to_marker = jump_instr + kInstrSize;
3444  int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
3445  bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
3446  bool require_64_bit_align = false;
3447  if (has_fp_values) {
3448  require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
3449  if (require_64_bit_align) {
3450  size_after_marker += kInstrSize;
3451  }
3452  size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
3453  }
3454 
3455  int size = size_up_to_marker + size_after_marker;
3456 
3457  // We emit a constant pool when:
3458  // * requested to do so by parameter force_emit (e.g. after each function).
3459  // * the distance from the first instruction accessing the constant pool to
3460  // any of the constant pool entries will exceed its limit the next
3461  // time the pool is checked. This is overly restrictive, but we don't emit
3462  // constant pool entries in-order so it's conservatively correct.
3463  // * the instruction doesn't require a jump after itself to jump over the
3464  // constant pool, and we're getting close to running out of range.
3465  if (!force_emit) {
3466  DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
3467  bool need_emit = false;
3468  if (has_fp_values) {
3469  int dist64 = pc_offset() +
3470  size -
3471  num_pending_32_bit_reloc_info_ * kPointerSize -
3472  first_const_pool_64_use_;
3473  if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
3474  (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
3475  need_emit = true;
3476  }
3477  }
3478  int dist32 =
3479  pc_offset() + size - first_const_pool_32_use_;
3480  if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
3481  (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
3482  need_emit = true;
3483  }
3484  if (!need_emit) return;
3485  }
3486 
3487  int needed_space = size + kGap;
3488  while (buffer_space() <= needed_space) GrowBuffer();
3489 
3490  {
3491  // Block recursive calls to CheckConstPool.
3492  BlockConstPoolScope block_const_pool(this);
3493  RecordComment("[ Constant Pool");
3494  RecordConstPool(size);
3495 
3496  // Emit jump over constant pool if necessary.
3497  Label after_pool;
3498  if (require_jump) {
3499  b(&after_pool);
3500  }
3501 
3502  // Put down constant pool marker "Undefined instruction".
3503  // The data size helps disassembly know what to print.
3504  emit(kConstantPoolMarker |
3505  EncodeConstantPoolLength(size_after_marker / kPointerSize));
3506 
3507  if (require_64_bit_align) {
3508  emit(kConstantPoolMarker);
3509  }
3510 
3511  // Emit 64-bit constant pool entries first: their range is smaller than
3512  // 32-bit entries.
3513  for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3514  RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3515 
3516  DCHECK(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment.
3517 
3518  Instr instr = instr_at(rinfo.pc());
3519  // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
3520  DCHECK((IsVldrDPcImmediateOffset(instr) &&
3521  GetVldrDRegisterImmediateOffset(instr) == 0));
3522 
3523  int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3524  DCHECK(is_uint10(delta));
3525 
3526  bool found = false;
3527  uint64_t value = rinfo.raw_data64();
3528  for (int j = 0; j < i; j++) {
3529  RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
3530  if (value == rinfo2.raw_data64()) {
3531  found = true;
3532  DCHECK(rinfo2.rmode() == RelocInfo::NONE64);
3533  Instr instr2 = instr_at(rinfo2.pc());
3534  DCHECK(IsVldrDPcImmediateOffset(instr2));
3535  delta = GetVldrDRegisterImmediateOffset(instr2);
3536  delta += rinfo2.pc() - rinfo.pc();
3537  break;
3538  }
3539  }
3540 
3541  instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
3542 
3543  if (!found) {
3544  uint64_t uint_data = rinfo.raw_data64();
3545  emit(uint_data & 0xFFFFFFFF);
3546  emit(uint_data >> 32);
3547  }
3548  }
3549 
3550  // Emit 32-bit constant pool entries.
3551  for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3552  RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3553  DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
3554  rinfo.rmode() != RelocInfo::POSITION &&
3555  rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
3556  rinfo.rmode() != RelocInfo::CONST_POOL &&
3557  rinfo.rmode() != RelocInfo::NONE64);
3558 
3559  Instr instr = instr_at(rinfo.pc());
3560 
3561  // 64-bit loads shouldn't get here.
3562  DCHECK(!IsVldrDPcImmediateOffset(instr));
3563 
3564  if (IsLdrPcImmediateOffset(instr) &&
3565  GetLdrRegisterImmediateOffset(instr) == 0) {
3566  int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3567  DCHECK(is_uint12(delta));
3568  // 0 is the smallest delta:
3569  // ldr rd, [pc, #0]
3570  // constant pool marker
3571  // data
3572 
3573  bool found = false;
3574  if (!serializer_enabled() && rinfo.rmode() >= RelocInfo::CELL) {
3575  for (int j = 0; j < i; j++) {
3576  RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
3577 
3578  if ((rinfo2.data() == rinfo.data()) &&
3579  (rinfo2.rmode() == rinfo.rmode())) {
3580  Instr instr2 = instr_at(rinfo2.pc());
3581  if (IsLdrPcImmediateOffset(instr2)) {
3582  delta = GetLdrRegisterImmediateOffset(instr2);
3583  delta += rinfo2.pc() - rinfo.pc();
3584  found = true;
3585  break;
3586  }
3587  }
3588  }
3589  }
3590 
3591  instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
3592 
3593  if (!found) {
3594  emit(rinfo.data());
3595  }
3596  } else {
3597  DCHECK(IsMovW(instr));
3598  }
3599  }
3600 
3601  num_pending_32_bit_reloc_info_ = 0;
3602  num_pending_64_bit_reloc_info_ = 0;
3603  first_const_pool_32_use_ = -1;
3604  first_const_pool_64_use_ = -1;
3605 
3606  RecordComment("]");
3607 
3608  if (after_pool.is_linked()) {
3609  bind(&after_pool);
3610  }
3611  }
3612 
3613  // Since a constant pool was just emitted, move the check offset forward by
3614  // the standard interval.
3615  next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3616 }
3617 
3618 
3619 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
3620  if (!FLAG_enable_ool_constant_pool) {
3621  return isolate->factory()->empty_constant_pool_array();
3622  }
3623  return constant_pool_builder_.New(isolate);
3624 }
3625 
3626 
3627 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
3628  constant_pool_builder_.Populate(this, constant_pool);
3629 }
3630 
3631 
3632 ConstantPoolBuilder::ConstantPoolBuilder()
3633  : entries_(), current_section_(ConstantPoolArray::SMALL_SECTION) {}
3634 
3635 
3636 bool ConstantPoolBuilder::IsEmpty() {
3637  return entries_.size() == 0;
3638 }
3639 
3640 
3641 ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
3642  RelocInfo::Mode rmode) {
3643  if (rmode == RelocInfo::NONE64) {
3644  return ConstantPoolArray::INT64;
3645  } else if (!RelocInfo::IsGCRelocMode(rmode)) {
3646  return ConstantPoolArray::INT32;
3647  } else if (RelocInfo::IsCodeTarget(rmode)) {
3648  return ConstantPoolArray::CODE_PTR;
3649  } else {
3650  DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
3651  return ConstantPoolArray::HEAP_PTR;
3652  }
3653 }
3654 
3655 
3656 ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
3657  Assembler* assm, const RelocInfo& rinfo) {
3658  RelocInfo::Mode rmode = rinfo.rmode();
3659  DCHECK(rmode != RelocInfo::COMMENT &&
3660  rmode != RelocInfo::POSITION &&
3661  rmode != RelocInfo::STATEMENT_POSITION &&
3662  rmode != RelocInfo::CONST_POOL);
3663 
3664  // Try to merge entries which won't be patched.
3665  int merged_index = -1;
3666  ConstantPoolArray::LayoutSection entry_section = current_section_;
3667  if (RelocInfo::IsNone(rmode) ||
3668  (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
3669  size_t i;
3670  std::vector<ConstantPoolEntry>::const_iterator it;
3671  for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
3672  if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
3673  // Merge with found entry.
3674  merged_index = i;
3675  entry_section = entries_[i].section_;
3676  break;
3677  }
3678  }
3679  }
3680  DCHECK(entry_section <= current_section_);
3681  entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
3682 
3683  if (merged_index == -1) {
3684  // Not merged, so update the appropriate count.
3685  number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
3686  }
3687 
3688  // Check if we still have room for another entry in the small section
3689  // given Arm's ldr and vldr immediate offset range.
3690  if (current_section_ == ConstantPoolArray::SMALL_SECTION &&
3691  !(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) &&
3692  is_uint10(ConstantPoolArray::MaxInt64Offset(
3693  small_entries()->count_of(ConstantPoolArray::INT64))))) {
3694  current_section_ = ConstantPoolArray::EXTENDED_SECTION;
3695  }
3696  return entry_section;
3697 }
3698 
3699 
3700 void ConstantPoolBuilder::Relocate(int pc_delta) {
3701  for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
3702  entry != entries_.end(); entry++) {
3703  DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
3704  entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
3705  }
3706 }
3707 
3708 
3709 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
3710  if (IsEmpty()) {
3711  return isolate->factory()->empty_constant_pool_array();
3712  } else if (extended_entries()->is_empty()) {
3713  return isolate->factory()->NewConstantPoolArray(*small_entries());
3714  } else {
3715  DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
3716  return isolate->factory()->NewExtendedConstantPoolArray(
3717  *small_entries(), *extended_entries());
3718  }
3719 }
3720 
3721 
3722 void ConstantPoolBuilder::Populate(Assembler* assm,
3723  ConstantPoolArray* constant_pool) {
3724  DCHECK_EQ(extended_entries()->is_empty(),
3725  !constant_pool->is_extended_layout());
3726  DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
3727  constant_pool, ConstantPoolArray::SMALL_SECTION)));
3728  if (constant_pool->is_extended_layout()) {
3729  DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
3730  constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
3731  }
3732 
3733  // Set up initial offsets.
3734  int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
3735  [ConstantPoolArray::NUMBER_OF_TYPES];
3736  for (int section = 0; section <= constant_pool->final_section(); section++) {
3737  int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
3738  ? small_entries()->total_count()
3739  : 0;
3740  for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) {
3741  ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i);
3742  if (number_of_entries_[section].count_of(type) != 0) {
3743  offsets[section][type] = constant_pool->OffsetOfElementAt(
3744  number_of_entries_[section].base_of(type) + section_start);
3745  }
3746  }
3747  }
3748 
3749  for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
3750  entry != entries_.end(); entry++) {
3751  RelocInfo rinfo = entry->rinfo_;
3752  RelocInfo::Mode rmode = entry->rinfo_.rmode();
3753  ConstantPoolArray::Type type = GetConstantPoolType(rmode);
3754 
3755  // Update constant pool if necessary and get the entry's offset.
3756  int offset;
3757  if (entry->merged_index_ == -1) {
3758  offset = offsets[entry->section_][type];
3759  offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
3760  if (type == ConstantPoolArray::INT64) {
3761  constant_pool->set_at_offset(offset, rinfo.data64());
3762  } else if (type == ConstantPoolArray::INT32) {
3763  constant_pool->set_at_offset(offset,
3764  static_cast<int32_t>(rinfo.data()));
3765  } else if (type == ConstantPoolArray::CODE_PTR) {
3766  constant_pool->set_at_offset(offset,
3767  reinterpret_cast<Address>(rinfo.data()));
3768  } else {
3769  DCHECK(type == ConstantPoolArray::HEAP_PTR);
3770  constant_pool->set_at_offset(offset,
3771  reinterpret_cast<Object*>(rinfo.data()));
3772  }
3773  offset -= kHeapObjectTag;
3774  entry->merged_index_ = offset; // Stash offset for merged entries.
3775  } else {
3776  DCHECK(entry->merged_index_ < (entry - entries_.begin()));
3777  offset = entries_[entry->merged_index_].merged_index_;
3778  }
3779 
3780  // Patch vldr/ldr instruction with correct offset.
3781  Instr instr = assm->instr_at(rinfo.pc());
3782  if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
3783  if (CpuFeatures::IsSupported(ARMv7)) {
3784  // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
3785  Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
3786  DCHECK((Assembler::IsMovW(instr) &&
3787  Instruction::ImmedMovwMovtValue(instr) == 0));
3788  DCHECK((Assembler::IsMovT(next_instr) &&
3789  Instruction::ImmedMovwMovtValue(next_instr) == 0));
3790  assm->instr_at_put(
3791  rinfo.pc(), Assembler::PatchMovwImmediate(instr, offset & 0xffff));
3792  assm->instr_at_put(
3793  rinfo.pc() + Assembler::kInstrSize,
3794  Assembler::PatchMovwImmediate(next_instr, offset >> 16));
3795  } else {
3796  // Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
3797  Instr instr_2 = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
3798  Instr instr_3 = assm->instr_at(rinfo.pc() + 2 * Assembler::kInstrSize);
3799  Instr instr_4 = assm->instr_at(rinfo.pc() + 3 * Assembler::kInstrSize);
3800  DCHECK((Assembler::IsMovImmed(instr) &&
3801  Instruction::Immed8Value(instr) == 0));
3802  DCHECK((Assembler::IsOrrImmed(instr_2) &&
3803  Instruction::Immed8Value(instr_2) == 0) &&
3804  Assembler::GetRn(instr_2).is(Assembler::GetRd(instr_2)));
3805  DCHECK((Assembler::IsOrrImmed(instr_3) &&
3806  Instruction::Immed8Value(instr_3) == 0) &&
3807  Assembler::GetRn(instr_3).is(Assembler::GetRd(instr_3)));
3808  DCHECK((Assembler::IsOrrImmed(instr_4) &&
3809  Instruction::Immed8Value(instr_4) == 0) &&
3810  Assembler::GetRn(instr_4).is(Assembler::GetRd(instr_4)));
3811  assm->instr_at_put(
3812  rinfo.pc(), Assembler::PatchShiftImm(instr, (offset & kImm8Mask)));
3813  assm->instr_at_put(
3814  rinfo.pc() + Assembler::kInstrSize,
3815  Assembler::PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
3816  assm->instr_at_put(
3817  rinfo.pc() + 2 * Assembler::kInstrSize,
3818  Assembler::PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
3819  assm->instr_at_put(
3820  rinfo.pc() + 3 * Assembler::kInstrSize,
3821  Assembler::PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
3822  }
3823  } else if (type == ConstantPoolArray::INT64) {
3824  // Instruction to patch must be 'vldr rd, [pp, #0]'.
3825  DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) &&
3826  Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
3827  DCHECK(is_uint10(offset));
3828  assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset(
3829  instr, offset));
3830  } else {
3831  // Instruction to patch must be 'ldr rd, [pp, #0]'.
3832  DCHECK((Assembler::IsLdrPpImmediateOffset(instr) &&
3833  Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
3834  DCHECK(is_uint12(offset));
3835  assm->instr_at_put(
3836  rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset));
3837  }
3838  }
3839 }
3840 
3841 
3842 } } // namespace v8::internal
3843 
3844 #endif // V8_TARGET_ARCH_ARM
#define kDoubleRegZero
#define kScratchDoubleReg
static bool ArmUsingHardFloat()
static const int kInstrSize
Assembler(Isolate *isolate, void *buffer, int buffer_size)
static void FlushICache(void *start, size_t size)
static bool IsSupported(CpuFeature f)
Definition: assembler.h:184
static unsigned cache_line_size_
Definition: assembler.h:206
static unsigned supported_
Definition: assembler.h:205
static void PrintFeatures()
static void ProbeImpl(bool cross_compile)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
static const int kApplyMask
Definition: assembler.h:591
void PatchCode(byte *instructions, int instruction_count)
byte * pc() const
Definition: assembler.h:457
void PatchCodeWithCall(Address target, int guard_bytes)
static const char * Name(int reg, bool is_double)
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for ARMv7(ARM only)") DEFINE_BOOL(enable_32dregs
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK_LE(v1, v2)
Definition: logging.h:210
#define CHECK(condition)
Definition: logging.h:36
#define FATAL(msg)
Definition: logging.h:26
#define CHECK_GT(a, b)
Definition: logging.h:177
#define UNIMPLEMENTED()
Definition: logging.h:28
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
void USE(T)
Definition: macros.h:322
unsigned short uint16_t
Definition: unicode.cc:23
int int32_t
Definition: unicode.cc:24
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
Matcher< Node * > IsBranch(const Matcher< Node * > &value_matcher, const Matcher< Node * > &control_matcher)
void DeleteArray(T *array)
Definition: allocation.h:68
const int kPointerSize
Definition: globals.h:129
@ MOVW_MOVT_IMMEDIATE_LOADS
Definition: globals.h:622
@ UNALIGNED_ACCESSES
Definition: globals.h:621
const int32_t kDefaultStopCode
int EncodeConstantPoolLength(int length)
Definition: constants-arm.h:21
TypeImpl< ZoneTypeConfig > Type
const VmovIndex VmovIndexHi
const Register r0
const uint32_t kMaxStopCode
const int kRegister_pc_Code
Definition: assembler-arm.h:91
const Register ip
const int kDoubleSize
Definition: globals.h:127
DwVfpRegister DoubleRegister
void MemMove(void *dest, const void *src, size_t size)
Definition: utils.h:353
kSerializedDataOffset Object
Definition: objects-inl.h:5322
const Register sp
uint32_t SRegisterFieldMask
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:146
const Register pc
const int kRegister_r8_Code
Definition: assembler-arm.h:84
Condition NegateCondition(Condition cond)
Definition: constants-arm.h:86
uint32_t RegList
Definition: frames.h:18
const Register lr
byte * Address
Definition: globals.h:101
void PrintF(const char *format,...)
Definition: utils.cc:80
const VmovIndex VmovIndexLo
const int kHeapObjectTag
Definition: v8.h:5737
const Register no_reg
const int kRegister_fp_Code
Definition: assembler-arm.h:87
const Register pp
const Instr kPushRegPattern
const Instr kPopRegPattern
const int kConstantPoolMarker
Definition: constants-arm.h:19
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:130
const int kNumRegisters
Definition: constants-arm.h:34
const int kRegister_sp_Code
Definition: assembler-arm.h:89
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
@ None
Definition: v8.h:2211
static const uint16_t * Align(const uint16_t *chars)
Definition: api.cc:4266
const int MB
Definition: d8.cc:164
#define P(name, number_of_args, result_size)
Definition: runtime.cc:53
#define I(name, number_of_args, result_size)
Definition: runtime.cc:9248
#define U(name)
Definition: runtime.cc:9020
static const int kNumReservedRegisters
static const char * AllocationIndexToString(int index)
#define S(x)
Definition: version.cc:55