V8 Project
assembler-mips.cc
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 
36 #include "src/v8.h"
37 
38 #if V8_TARGET_ARCH_MIPS
39 
40 #include "src/base/bits.h"
41 #include "src/base/cpu.h"
43 #include "src/serialize.h"
44 
45 namespace v8 {
46 namespace internal {
47 
48 // Get the CPU features enabled by the build. For cross compilation the
49 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
50 // can be defined to enable FPU instructions when building the
51 // snapshot.
52 static unsigned CpuFeaturesImpliedByCompiler() {
53  unsigned answer = 0;
54 #ifdef CAN_USE_FPU_INSTRUCTIONS
55  answer |= 1u << FPU;
56 #endif // def CAN_USE_FPU_INSTRUCTIONS
57 
58  // If the compiler is allowed to use FPU then we can use FPU too in our code
59  // generation even when generating snapshots. This won't work for cross
60  // compilation.
61 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
62  answer |= 1u << FPU;
63 #endif
64 
65  return answer;
66 }
67 
68 
69 const char* DoubleRegister::AllocationIndexToString(int index) {
70  DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
71  const char* const names[] = {
72  "f0",
73  "f2",
74  "f4",
75  "f6",
76  "f8",
77  "f10",
78  "f12",
79  "f14",
80  "f16",
81  "f18",
82  "f20",
83  "f22",
84  "f24",
85  "f26"
86  };
87  return names[index];
88 }
89 
90 
91 void CpuFeatures::ProbeImpl(bool cross_compile) {
92  supported_ |= CpuFeaturesImpliedByCompiler();
93 
94  // Only use statically determined features for cross compile (snapshot).
95  if (cross_compile) return;
96 
97  // If the compiler is allowed to use fpu then we can use fpu too in our
98  // code generation.
99 #ifndef __mips__
100  // For the simulator build, use FPU.
101  supported_ |= 1u << FPU;
102 #if defined(_MIPS_ARCH_MIPS32R6)
103  // FP64 mode is implied on r6.
104  supported_ |= 1u << FP64FPU;
105 #endif
106 #if defined(FPU_MODE_FP64)
107  supported_ |= 1u << FP64FPU;
108 #endif
109 #else
110  // Probe for additional features at runtime.
111  base::CPU cpu;
112  if (cpu.has_fpu()) supported_ |= 1u << FPU;
113 #if defined(FPU_MODE_FPXX)
114  if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
115 #elif defined(FPU_MODE_FP64)
116  supported_ |= 1u << FP64FPU;
117 #endif
118 #if defined(_MIPS_ARCH_MIPS32RX)
119  if (cpu.architecture() == 6) {
120  supported_ |= 1u << MIPSr6;
121  } else if (cpu.architecture() == 2) {
122  supported_ |= 1u << MIPSr1;
123  supported_ |= 1u << MIPSr2;
124  } else {
125  supported_ |= 1u << MIPSr1;
126  }
127 #endif
128 #endif
129 }
130 
131 
132 void CpuFeatures::PrintTarget() { }
134 
135 
136 int ToNumber(Register reg) {
137  DCHECK(reg.is_valid());
138  const int kNumbers[] = {
139  0, // zero_reg
140  1, // at
141  2, // v0
142  3, // v1
143  4, // a0
144  5, // a1
145  6, // a2
146  7, // a3
147  8, // t0
148  9, // t1
149  10, // t2
150  11, // t3
151  12, // t4
152  13, // t5
153  14, // t6
154  15, // t7
155  16, // s0
156  17, // s1
157  18, // s2
158  19, // s3
159  20, // s4
160  21, // s5
161  22, // s6
162  23, // s7
163  24, // t8
164  25, // t9
165  26, // k0
166  27, // k1
167  28, // gp
168  29, // sp
169  30, // fp
170  31, // ra
171  };
172  return kNumbers[reg.code()];
173 }
174 
175 
176 Register ToRegister(int num) {
177  DCHECK(num >= 0 && num < kNumRegisters);
178  const Register kRegisters[] = {
179  zero_reg,
180  at,
181  v0, v1,
182  a0, a1, a2, a3,
183  t0, t1, t2, t3, t4, t5, t6, t7,
184  s0, s1, s2, s3, s4, s5, s6, s7,
185  t8, t9,
186  k0, k1,
187  gp,
188  sp,
189  fp,
190  ra
191  };
192  return kRegisters[num];
193 }
194 
195 
196 // -----------------------------------------------------------------------------
197 // Implementation of RelocInfo.
198 
201 
202 
204  // The deserializer needs to know whether a pointer is specially coded. Being
205  // specially coded on MIPS means that it is a lui/ori instruction, and that is
206  // always the case inside code objects.
207  return true;
208 }
209 
210 
212  return false;
213 }
214 
215 
216 // Patch the code at the current address with the supplied instructions.
217 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
218  Instr* pc = reinterpret_cast<Instr*>(pc_);
219  Instr* instr = reinterpret_cast<Instr*>(instructions);
220  for (int i = 0; i < instruction_count; i++) {
221  *(pc + i) = *(instr + i);
222  }
223 
224  // Indicate that code has changed.
225  CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
226 }
227 
228 
229 // Patch the code at the current PC with a call to the target address.
230 // Additional guard instructions can be added if required.
231 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
232  // Patch the code at the current address with a call to the target.
234 }
235 
236 
237 // -----------------------------------------------------------------------------
238 // Implementation of Operand and MemOperand.
239 // See assembler-mips-inl.h for inlined constructors.
240 
241 Operand::Operand(Handle<Object> handle) {
242  AllowDeferredHandleDereference using_raw_address;
243  rm_ = no_reg;
244  // Verify all Objects referred by code are NOT in new space.
245  Object* obj = *handle;
246  if (obj->IsHeapObject()) {
247  DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
248  imm32_ = reinterpret_cast<intptr_t>(handle.location());
250  } else {
251  // No relocation needed.
252  imm32_ = reinterpret_cast<intptr_t>(obj);
253  rmode_ = RelocInfo::NONE32;
254  }
255 }
256 
257 
258 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
259  offset_ = offset;
260 }
261 
262 
263 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
264  OffsetAddend offset_addend) : Operand(rm) {
265  offset_ = unit * multiplier + offset_addend;
266 }
267 
268 
269 // -----------------------------------------------------------------------------
270 // Specific instructions, constants, and masks.
271 
272 static const int kNegOffset = 0x00008000;
273 // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
274 // operations as post-increment of sp.
277  | (kPointerSize & kImm16Mask); // NOLINT
278 // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
281  | (-kPointerSize & kImm16Mask); // NOLINT
282 // sw(r, MemOperand(sp, 0))
284  | (0 & kImm16Mask); // NOLINT
285 // lw(r, MemOperand(sp, 0))
287  | (0 & kImm16Mask); // NOLINT
288 
290  | (0 & kImm16Mask); // NOLINT
291 
293  | (0 & kImm16Mask); // NOLINT
294 
296  | (kNegOffset & kImm16Mask); // NOLINT
297 
299  | (kNegOffset & kImm16Mask); // NOLINT
300 // A mask for the Rt register for push, pop, lw, sw instructions.
301 const Instr kRtMask = kRtFieldMask;
302 const Instr kLwSwInstrTypeMask = 0xffe00000;
305 
306 
307 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
308  : AssemblerBase(isolate, buffer, buffer_size),
309  recorded_ast_id_(TypeFeedbackId::None()),
310  positions_recorder_(this) {
311  reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
312 
313  last_trampoline_pool_end_ = 0;
314  no_trampoline_pool_before_ = 0;
315  trampoline_pool_blocked_nesting_ = 0;
316  // We leave space (16 * kTrampolineSlotsSize)
317  // for BlockTrampolinePoolScope buffer.
318  next_buffer_check_ = FLAG_force_long_branches
319  ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
320  internal_trampoline_exception_ = false;
321  last_bound_pos_ = 0;
322 
323  trampoline_emitted_ = FLAG_force_long_branches;
324  unbound_labels_count_ = 0;
325  block_buffer_growth_ = false;
326 
327  ClearRecordedAstId();
328 }
329 
330 
331 void Assembler::GetCode(CodeDesc* desc) {
332  DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
333  // Set up code descriptor.
334  desc->buffer = buffer_;
335  desc->buffer_size = buffer_size_;
336  desc->instr_size = pc_offset();
337  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
338  desc->origin = this;
339 }
340 
341 
342 void Assembler::Align(int m) {
343  DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
344  while ((pc_offset() & (m - 1)) != 0) {
345  nop();
346  }
347 }
348 
349 
350 void Assembler::CodeTargetAlign() {
351  // No advantage to aligning branch/call targets to more than
352  // single instruction, that I am aware of.
353  Align(4);
354 }
355 
356 
357 Register Assembler::GetRtReg(Instr instr) {
358  Register rt;
359  rt.code_ = (instr & kRtFieldMask) >> kRtShift;
360  return rt;
361 }
362 
363 
364 Register Assembler::GetRsReg(Instr instr) {
365  Register rs;
366  rs.code_ = (instr & kRsFieldMask) >> kRsShift;
367  return rs;
368 }
369 
370 
371 Register Assembler::GetRdReg(Instr instr) {
372  Register rd;
373  rd.code_ = (instr & kRdFieldMask) >> kRdShift;
374  return rd;
375 }
376 
377 
378 uint32_t Assembler::GetRt(Instr instr) {
379  return (instr & kRtFieldMask) >> kRtShift;
380 }
381 
382 
383 uint32_t Assembler::GetRtField(Instr instr) {
384  return instr & kRtFieldMask;
385 }
386 
387 
388 uint32_t Assembler::GetRs(Instr instr) {
389  return (instr & kRsFieldMask) >> kRsShift;
390 }
391 
392 
393 uint32_t Assembler::GetRsField(Instr instr) {
394  return instr & kRsFieldMask;
395 }
396 
397 
398 uint32_t Assembler::GetRd(Instr instr) {
399  return (instr & kRdFieldMask) >> kRdShift;
400 }
401 
402 
403 uint32_t Assembler::GetRdField(Instr instr) {
404  return instr & kRdFieldMask;
405 }
406 
407 
408 uint32_t Assembler::GetSa(Instr instr) {
409  return (instr & kSaFieldMask) >> kSaShift;
410 }
411 
412 
413 uint32_t Assembler::GetSaField(Instr instr) {
414  return instr & kSaFieldMask;
415 }
416 
417 
418 uint32_t Assembler::GetOpcodeField(Instr instr) {
419  return instr & kOpcodeMask;
420 }
421 
422 
423 uint32_t Assembler::GetFunction(Instr instr) {
424  return (instr & kFunctionFieldMask) >> kFunctionShift;
425 }
426 
427 
428 uint32_t Assembler::GetFunctionField(Instr instr) {
429  return instr & kFunctionFieldMask;
430 }
431 
432 
433 uint32_t Assembler::GetImmediate16(Instr instr) {
434  return instr & kImm16Mask;
435 }
436 
437 
438 uint32_t Assembler::GetLabelConst(Instr instr) {
439  return instr & ~kImm16Mask;
440 }
441 
442 
443 bool Assembler::IsPop(Instr instr) {
444  return (instr & ~kRtMask) == kPopRegPattern;
445 }
446 
447 
448 bool Assembler::IsPush(Instr instr) {
449  return (instr & ~kRtMask) == kPushRegPattern;
450 }
451 
452 
453 bool Assembler::IsSwRegFpOffset(Instr instr) {
454  return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
455 }
456 
457 
458 bool Assembler::IsLwRegFpOffset(Instr instr) {
459  return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
460 }
461 
462 
463 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
464  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
466 }
467 
468 
469 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
470  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
472 }
473 
474 
475 // Labels refer to positions in the (to be) generated code.
476 // There are bound, linked, and unused labels.
477 //
478 // Bound labels refer to known positions in the already
479 // generated code. pos() is the position the label refers to.
480 //
481 // Linked labels refer to unknown positions in the code
482 // to be generated; pos() is the position of the last
483 // instruction using the label.
484 
485 // The link chain is terminated by a value in the instruction of -1,
486 // which is an otherwise illegal value (branch -1 is inf loop).
487 // The instruction 16-bit offset field addresses 32-bit words, but in
488 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
489 
490 const int kEndOfChain = -4;
491 // Determines the end of the Jump chain (a subset of the label link chain).
492 const int kEndOfJumpChain = 0;
493 
494 
495 bool Assembler::IsBranch(Instr instr) {
496  uint32_t opcode = GetOpcodeField(instr);
497  uint32_t rt_field = GetRtField(instr);
498  uint32_t rs_field = GetRsField(instr);
499  // Checks if the instruction is a branch.
500  return opcode == BEQ ||
501  opcode == BNE ||
502  opcode == BLEZ ||
503  opcode == BGTZ ||
504  opcode == BEQL ||
505  opcode == BNEL ||
506  opcode == BLEZL ||
507  opcode == BGTZL ||
508  (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
509  rt_field == BLTZAL || rt_field == BGEZAL)) ||
510  (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
511  (opcode == COP1 && rs_field == BC1EQZ) ||
512  (opcode == COP1 && rs_field == BC1NEZ);
513 }
514 
515 
516 bool Assembler::IsEmittedConstant(Instr instr) {
517  uint32_t label_constant = GetLabelConst(instr);
518  return label_constant == 0; // Emitted label const in reg-exp engine.
519 }
520 
521 
522 bool Assembler::IsBeq(Instr instr) {
523  return GetOpcodeField(instr) == BEQ;
524 }
525 
526 
527 bool Assembler::IsBne(Instr instr) {
528  return GetOpcodeField(instr) == BNE;
529 }
530 
531 
532 bool Assembler::IsJump(Instr instr) {
533  uint32_t opcode = GetOpcodeField(instr);
534  uint32_t rt_field = GetRtField(instr);
535  uint32_t rd_field = GetRdField(instr);
536  uint32_t function_field = GetFunctionField(instr);
537  // Checks if the instruction is a jump.
538  return opcode == J || opcode == JAL ||
539  (opcode == SPECIAL && rt_field == 0 &&
540  ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
541 }
542 
543 
544 bool Assembler::IsJ(Instr instr) {
545  uint32_t opcode = GetOpcodeField(instr);
546  // Checks if the instruction is a jump.
547  return opcode == J;
548 }
549 
550 
551 bool Assembler::IsJal(Instr instr) {
552  return GetOpcodeField(instr) == JAL;
553 }
554 
555 
556 bool Assembler::IsJr(Instr instr) {
558  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
559  } else {
560  return GetOpcodeField(instr) == SPECIAL &&
561  GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
562  }
563 }
564 
565 
566 bool Assembler::IsJalr(Instr instr) {
567  return GetOpcodeField(instr) == SPECIAL &&
568  GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
569 }
570 
571 
572 bool Assembler::IsLui(Instr instr) {
573  uint32_t opcode = GetOpcodeField(instr);
574  // Checks if the instruction is a load upper immediate.
575  return opcode == LUI;
576 }
577 
578 
579 bool Assembler::IsOri(Instr instr) {
580  uint32_t opcode = GetOpcodeField(instr);
581  // Checks if the instruction is a load upper immediate.
582  return opcode == ORI;
583 }
584 
585 
586 bool Assembler::IsNop(Instr instr, unsigned int type) {
587  // See Assembler::nop(type).
588  DCHECK(type < 32);
589  uint32_t opcode = GetOpcodeField(instr);
590  uint32_t function = GetFunctionField(instr);
591  uint32_t rt = GetRt(instr);
592  uint32_t rd = GetRd(instr);
593  uint32_t sa = GetSa(instr);
594 
595  // Traditional mips nop == sll(zero_reg, zero_reg, 0)
596  // When marking non-zero type, use sll(zero_reg, at, type)
597  // to avoid use of mips ssnop and ehb special encodings
598  // of the sll instruction.
599 
600  Register nop_rt_reg = (type == 0) ? zero_reg : at;
601  bool ret = (opcode == SPECIAL && function == SLL &&
602  rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
603  rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
604  sa == type);
605 
606  return ret;
607 }
608 
609 
610 int32_t Assembler::GetBranchOffset(Instr instr) {
611  DCHECK(IsBranch(instr));
612  return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
613 }
614 
615 
616 bool Assembler::IsLw(Instr instr) {
617  return ((instr & kOpcodeMask) == LW);
618 }
619 
620 
621 int16_t Assembler::GetLwOffset(Instr instr) {
622  DCHECK(IsLw(instr));
623  return ((instr & kImm16Mask));
624 }
625 
626 
627 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
628  DCHECK(IsLw(instr));
629 
630  // We actually create a new lw instruction based on the original one.
631  Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
632  | (offset & kImm16Mask);
633 
634  return temp_instr;
635 }
636 
637 
638 bool Assembler::IsSw(Instr instr) {
639  return ((instr & kOpcodeMask) == SW);
640 }
641 
642 
643 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
644  DCHECK(IsSw(instr));
645  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
646 }
647 
648 
649 bool Assembler::IsAddImmediate(Instr instr) {
650  return ((instr & kOpcodeMask) == ADDIU);
651 }
652 
653 
654 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
655  DCHECK(IsAddImmediate(instr));
656  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
657 }
658 
659 
660 bool Assembler::IsAndImmediate(Instr instr) {
661  return GetOpcodeField(instr) == ANDI;
662 }
663 
664 
665 int Assembler::target_at(int32_t pos) {
666  Instr instr = instr_at(pos);
667  if ((instr & ~kImm16Mask) == 0) {
668  // Emitted label constant, not part of a branch.
669  if (instr == 0) {
670  return kEndOfChain;
671  } else {
672  int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
673  return (imm18 + pos);
674  }
675  }
676  // Check we have a branch or jump instruction.
677  DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
678  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
679  // the compiler uses arithmectic shifts for signed integers.
680  if (IsBranch(instr)) {
681  int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
682 
683  if (imm18 == kEndOfChain) {
684  // EndOfChain sentinel is returned directly, not relative to pc or pos.
685  return kEndOfChain;
686  } else {
687  return pos + kBranchPCOffset + imm18;
688  }
689  } else if (IsLui(instr)) {
690  Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
691  Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
692  DCHECK(IsOri(instr_ori));
693  int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
694  imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
695 
696  if (imm == kEndOfJumpChain) {
697  // EndOfChain sentinel is returned directly, not relative to pc or pos.
698  return kEndOfChain;
699  } else {
700  uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
701  int32_t delta = instr_address - imm;
702  DCHECK(pos > delta);
703  return pos - delta;
704  }
705  } else {
706  int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
707  if (imm28 == kEndOfJumpChain) {
708  // EndOfChain sentinel is returned directly, not relative to pc or pos.
709  return kEndOfChain;
710  } else {
711  uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
712  instr_address &= kImm28Mask;
713  int32_t delta = instr_address - imm28;
714  DCHECK(pos > delta);
715  return pos - delta;
716  }
717  }
718 }
719 
720 
721 void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
722  Instr instr = instr_at(pos);
723  if ((instr & ~kImm16Mask) == 0) {
724  DCHECK(target_pos == kEndOfChain || target_pos >= 0);
725  // Emitted label constant, not part of a branch.
726  // Make label relative to Code* of generated Code object.
727  instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
728  return;
729  }
730 
731  DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
732  if (IsBranch(instr)) {
733  int32_t imm18 = target_pos - (pos + kBranchPCOffset);
734  DCHECK((imm18 & 3) == 0);
735 
736  instr &= ~kImm16Mask;
737  int32_t imm16 = imm18 >> 2;
738  DCHECK(is_int16(imm16));
739 
740  instr_at_put(pos, instr | (imm16 & kImm16Mask));
741  } else if (IsLui(instr)) {
742  Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
743  Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
744  DCHECK(IsOri(instr_ori));
745  uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
746  DCHECK((imm & 3) == 0);
747 
748  instr_lui &= ~kImm16Mask;
749  instr_ori &= ~kImm16Mask;
750 
751  instr_at_put(pos + 0 * Assembler::kInstrSize,
752  instr_lui | ((imm & kHiMask) >> kLuiShift));
753  instr_at_put(pos + 1 * Assembler::kInstrSize,
754  instr_ori | (imm & kImm16Mask));
755  } else {
756  uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
757  imm28 &= kImm28Mask;
758  DCHECK((imm28 & 3) == 0);
759 
760  instr &= ~kImm26Mask;
761  uint32_t imm26 = imm28 >> 2;
762  DCHECK(is_uint26(imm26));
763 
764  instr_at_put(pos, instr | (imm26 & kImm26Mask));
765  }
766 }
767 
768 
769 void Assembler::print(Label* L) {
770  if (L->is_unused()) {
771  PrintF("unused label\n");
772  } else if (L->is_bound()) {
773  PrintF("bound label to %d\n", L->pos());
774  } else if (L->is_linked()) {
775  Label l = *L;
776  PrintF("unbound label");
777  while (l.is_linked()) {
778  PrintF("@ %d ", l.pos());
779  Instr instr = instr_at(l.pos());
780  if ((instr & ~kImm16Mask) == 0) {
781  PrintF("value\n");
782  } else {
783  PrintF("%d\n", instr);
784  }
785  next(&l);
786  }
787  } else {
788  PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
789  }
790 }
791 
792 
793 void Assembler::bind_to(Label* L, int pos) {
794  DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
795  int32_t trampoline_pos = kInvalidSlotPos;
796  if (L->is_linked() && !trampoline_emitted_) {
797  unbound_labels_count_--;
798  next_buffer_check_ += kTrampolineSlotsSize;
799  }
800 
801  while (L->is_linked()) {
802  int32_t fixup_pos = L->pos();
803  int32_t dist = pos - fixup_pos;
804  next(L); // Call next before overwriting link with target at fixup_pos.
805  Instr instr = instr_at(fixup_pos);
806  if (IsBranch(instr)) {
807  if (dist > kMaxBranchOffset) {
808  if (trampoline_pos == kInvalidSlotPos) {
809  trampoline_pos = get_trampoline_entry(fixup_pos);
810  CHECK(trampoline_pos != kInvalidSlotPos);
811  }
812  DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
813  target_at_put(fixup_pos, trampoline_pos);
814  fixup_pos = trampoline_pos;
815  dist = pos - fixup_pos;
816  }
817  target_at_put(fixup_pos, pos);
818  } else {
819  DCHECK(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
820  target_at_put(fixup_pos, pos);
821  }
822  }
823  L->bind_to(pos);
824 
825  // Keep track of the last bound label so we don't eliminate any instructions
826  // before a bound label.
827  if (pos > last_bound_pos_)
828  last_bound_pos_ = pos;
829 }
830 
831 
832 void Assembler::bind(Label* L) {
833  DCHECK(!L->is_bound()); // Label can only be bound once.
834  bind_to(L, pc_offset());
835 }
836 
837 
838 void Assembler::next(Label* L) {
839  DCHECK(L->is_linked());
840  int link = target_at(L->pos());
841  if (link == kEndOfChain) {
842  L->Unuse();
843  } else {
844  DCHECK(link >= 0);
845  L->link_to(link);
846  }
847 }
848 
849 
850 bool Assembler::is_near(Label* L) {
851  if (L->is_bound()) {
852  return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
853  }
854  return false;
855 }
856 
857 
858 // We have to use a temporary register for things that can be relocated even
859 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
860 // space. There is no guarantee that the relocated location can be similarly
861 // encoded.
862 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
863  return !RelocInfo::IsNone(rmode);
864 }
865 
866 void Assembler::GenInstrRegister(Opcode opcode,
867  Register rs,
868  Register rt,
869  Register rd,
870  uint16_t sa,
871  SecondaryField func) {
872  DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
873  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
874  | (rd.code() << kRdShift) | (sa << kSaShift) | func;
875  emit(instr);
876 }
877 
878 
879 void Assembler::GenInstrRegister(Opcode opcode,
880  Register rs,
881  Register rt,
882  uint16_t msb,
883  uint16_t lsb,
884  SecondaryField func) {
885  DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
886  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
887  | (msb << kRdShift) | (lsb << kSaShift) | func;
888  emit(instr);
889 }
890 
891 
892 void Assembler::GenInstrRegister(Opcode opcode,
893  SecondaryField fmt,
894  FPURegister ft,
895  FPURegister fs,
896  FPURegister fd,
897  SecondaryField func) {
898  DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
899  Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
900  | (fd.code() << kFdShift) | func;
901  emit(instr);
902 }
903 
904 
905 void Assembler::GenInstrRegister(Opcode opcode,
906  FPURegister fr,
907  FPURegister ft,
908  FPURegister fs,
909  FPURegister fd,
910  SecondaryField func) {
911  DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
912  Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
913  | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
914  emit(instr);
915 }
916 
917 
918 void Assembler::GenInstrRegister(Opcode opcode,
919  SecondaryField fmt,
920  Register rt,
921  FPURegister fs,
922  FPURegister fd,
923  SecondaryField func) {
924  DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
925  Instr instr = opcode | fmt | (rt.code() << kRtShift)
926  | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
927  emit(instr);
928 }
929 
930 
931 void Assembler::GenInstrRegister(Opcode opcode,
932  SecondaryField fmt,
933  Register rt,
934  FPUControlRegister fs,
935  SecondaryField func) {
936  DCHECK(fs.is_valid() && rt.is_valid());
937  Instr instr =
938  opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
939  emit(instr);
940 }
941 
942 
943 // Instructions with immediate value.
944 // Registers are in the order of the instruction encoding, from left to right.
945 void Assembler::GenInstrImmediate(Opcode opcode,
946  Register rs,
947  Register rt,
948  int32_t j) {
949  DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
950  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
951  | (j & kImm16Mask);
952  emit(instr);
953 }
954 
955 
956 void Assembler::GenInstrImmediate(Opcode opcode,
957  Register rs,
958  SecondaryField SF,
959  int32_t j) {
960  DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
961  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
962  emit(instr);
963 }
964 
965 
966 void Assembler::GenInstrImmediate(Opcode opcode,
967  Register rs,
968  FPURegister ft,
969  int32_t j) {
970  DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
971  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
972  | (j & kImm16Mask);
973  emit(instr);
974 }
975 
976 
977 void Assembler::GenInstrJump(Opcode opcode,
978  uint32_t address) {
979  BlockTrampolinePoolScope block_trampoline_pool(this);
980  DCHECK(is_uint26(address));
981  Instr instr = opcode | address;
982  emit(instr);
983  BlockTrampolinePoolFor(1); // For associated delay slot.
984 }
985 
986 
987 // Returns the next free trampoline entry.
988 int32_t Assembler::get_trampoline_entry(int32_t pos) {
989  int32_t trampoline_entry = kInvalidSlotPos;
990 
991  if (!internal_trampoline_exception_) {
992  if (trampoline_.start() > pos) {
993  trampoline_entry = trampoline_.take_slot();
994  }
995 
996  if (kInvalidSlotPos == trampoline_entry) {
997  internal_trampoline_exception_ = true;
998  }
999  }
1000  return trampoline_entry;
1001 }
1002 
1003 
1004 uint32_t Assembler::jump_address(Label* L) {
1005  int32_t target_pos;
1006 
1007  if (L->is_bound()) {
1008  target_pos = L->pos();
1009  } else {
1010  if (L->is_linked()) {
1011  target_pos = L->pos(); // L's link.
1012  L->link_to(pc_offset());
1013  } else {
1014  L->link_to(pc_offset());
1015  return kEndOfJumpChain;
1016  }
1017  }
1018 
1019  uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
1020  DCHECK((imm & 3) == 0);
1021 
1022  return imm;
1023 }
1024 
1025 
1026 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1027  int32_t target_pos;
1028 
1029  if (L->is_bound()) {
1030  target_pos = L->pos();
1031  } else {
1032  if (L->is_linked()) {
1033  target_pos = L->pos();
1034  L->link_to(pc_offset());
1035  } else {
1036  L->link_to(pc_offset());
1037  if (!trampoline_emitted_) {
1038  unbound_labels_count_++;
1039  next_buffer_check_ -= kTrampolineSlotsSize;
1040  }
1041  return kEndOfChain;
1042  }
1043  }
1044 
1045  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1046  DCHECK((offset & 3) == 0);
1047  DCHECK(is_int16(offset >> 2));
1048 
1049  return offset;
1050 }
1051 
1052 
1053 int32_t Assembler::branch_offset_compact(Label* L,
1054  bool jump_elimination_allowed) {
1055  int32_t target_pos;
1056  if (L->is_bound()) {
1057  target_pos = L->pos();
1058  } else {
1059  if (L->is_linked()) {
1060  target_pos = L->pos();
1061  L->link_to(pc_offset());
1062  } else {
1063  L->link_to(pc_offset());
1064  if (!trampoline_emitted_) {
1065  unbound_labels_count_++;
1066  next_buffer_check_ -= kTrampolineSlotsSize;
1067  }
1068  return kEndOfChain;
1069  }
1070  }
1071 
1072  int32_t offset = target_pos - pc_offset();
1073  DCHECK((offset & 3) == 0);
1074  DCHECK(is_int16(offset >> 2));
1075 
1076  return offset;
1077 }
1078 
1079 
1080 int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
1081  int32_t target_pos;
1082 
1083  if (L->is_bound()) {
1084  target_pos = L->pos();
1085  } else {
1086  if (L->is_linked()) {
1087  target_pos = L->pos();
1088  L->link_to(pc_offset());
1089  } else {
1090  L->link_to(pc_offset());
1091  if (!trampoline_emitted_) {
1092  unbound_labels_count_++;
1093  next_buffer_check_ -= kTrampolineSlotsSize;
1094  }
1095  return kEndOfChain;
1096  }
1097  }
1098 
1099  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1100  DCHECK((offset & 3) == 0);
1101  DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
1102 
1103  return offset;
1104 }
1105 
1106 
1107 int32_t Assembler::branch_offset21_compact(Label* L,
1108  bool jump_elimination_allowed) {
1109  int32_t target_pos;
1110 
1111  if (L->is_bound()) {
1112  target_pos = L->pos();
1113  } else {
1114  if (L->is_linked()) {
1115  target_pos = L->pos();
1116  L->link_to(pc_offset());
1117  } else {
1118  L->link_to(pc_offset());
1119  if (!trampoline_emitted_) {
1120  unbound_labels_count_++;
1121  next_buffer_check_ -= kTrampolineSlotsSize;
1122  }
1123  return kEndOfChain;
1124  }
1125  }
1126 
1127  int32_t offset = target_pos - pc_offset();
1128  DCHECK((offset & 3) == 0);
1129  DCHECK(((offset >> 2) & 0xFFe00000) == 0); // Offset is 21bit width.
1130 
1131  return offset;
1132 }
1133 
1134 
1135 void Assembler::label_at_put(Label* L, int at_offset) {
1136  int target_pos;
1137  if (L->is_bound()) {
1138  target_pos = L->pos();
1139  instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1140  } else {
1141  if (L->is_linked()) {
1142  target_pos = L->pos(); // L's link.
1143  int32_t imm18 = target_pos - at_offset;
1144  DCHECK((imm18 & 3) == 0);
1145  int32_t imm16 = imm18 >> 2;
1146  DCHECK(is_int16(imm16));
1147  instr_at_put(at_offset, (imm16 & kImm16Mask));
1148  } else {
1149  target_pos = kEndOfChain;
1150  instr_at_put(at_offset, 0);
1151  if (!trampoline_emitted_) {
1152  unbound_labels_count_++;
1153  next_buffer_check_ -= kTrampolineSlotsSize;
1154  }
1155  }
1156  L->link_to(at_offset);
1157  }
1158 }
1159 
1160 
1161 //------- Branch and jump instructions --------
1162 
1163 void Assembler::b(int16_t offset) {
1164  beq(zero_reg, zero_reg, offset);
1165 }
1166 
1167 
1168 void Assembler::bal(int16_t offset) {
1169  positions_recorder()->WriteRecordedPositions();
1170  bgezal(zero_reg, offset);
1171 }
1172 
1173 
1174 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1175  BlockTrampolinePoolScope block_trampoline_pool(this);
1176  GenInstrImmediate(BEQ, rs, rt, offset);
1177  BlockTrampolinePoolFor(1); // For associated delay slot.
1178 }
1179 
1180 
1181 void Assembler::bgez(Register rs, int16_t offset) {
1182  BlockTrampolinePoolScope block_trampoline_pool(this);
1183  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1184  BlockTrampolinePoolFor(1); // For associated delay slot.
1185 }
1186 
1187 
1188 void Assembler::bgezc(Register rt, int16_t offset) {
1190  DCHECK(!(rt.is(zero_reg)));
1191  GenInstrImmediate(BLEZL, rt, rt, offset);
1192 }
1193 
1194 
1195 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1197  DCHECK(!(rs.is(zero_reg)));
1198  DCHECK(!(rt.is(zero_reg)));
1199  DCHECK(rs.code() != rt.code());
1200  GenInstrImmediate(BLEZ, rs, rt, offset);
1201 }
1202 
1203 
1204 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1206  DCHECK(!(rs.is(zero_reg)));
1207  DCHECK(!(rt.is(zero_reg)));
1208  DCHECK(rs.code() != rt.code());
1209  GenInstrImmediate(BLEZL, rs, rt, offset);
1210 }
1211 
1212 
1213 void Assembler::bgezal(Register rs, int16_t offset) {
1214  DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
1215  BlockTrampolinePoolScope block_trampoline_pool(this);
1216  positions_recorder()->WriteRecordedPositions();
1217  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1218  BlockTrampolinePoolFor(1); // For associated delay slot.
1219 }
1220 
1221 
1222 void Assembler::bgtz(Register rs, int16_t offset) {
1223  BlockTrampolinePoolScope block_trampoline_pool(this);
1224  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1225  BlockTrampolinePoolFor(1); // For associated delay slot.
1226 }
1227 
1228 
1229 void Assembler::bgtzc(Register rt, int16_t offset) {
1231  DCHECK(!(rt.is(zero_reg)));
1232  GenInstrImmediate(BGTZL, zero_reg, rt, offset);
1233 }
1234 
1235 
1236 void Assembler::blez(Register rs, int16_t offset) {
1237  BlockTrampolinePoolScope block_trampoline_pool(this);
1238  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1239  BlockTrampolinePoolFor(1); // For associated delay slot.
1240 }
1241 
1242 
1243 void Assembler::blezc(Register rt, int16_t offset) {
1245  DCHECK(!(rt.is(zero_reg)));
1246  GenInstrImmediate(BLEZL, zero_reg, rt, offset);
1247 }
1248 
1249 
1250 void Assembler::bltzc(Register rt, int16_t offset) {
1252  DCHECK(!(rt.is(zero_reg)));
1253  GenInstrImmediate(BGTZL, rt, rt, offset);
1254 }
1255 
1256 
1257 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1259  DCHECK(!(rs.is(zero_reg)));
1260  DCHECK(!(rt.is(zero_reg)));
1261  DCHECK(rs.code() != rt.code());
1262  GenInstrImmediate(BGTZ, rs, rt, offset);
1263 }
1264 
1265 
1266 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1268  DCHECK(!(rs.is(zero_reg)));
1269  DCHECK(!(rt.is(zero_reg)));
1270  DCHECK(rs.code() != rt.code());
1271  GenInstrImmediate(BGTZL, rs, rt, offset);
1272 }
1273 
1274 
1275 void Assembler::bltz(Register rs, int16_t offset) {
1276  BlockTrampolinePoolScope block_trampoline_pool(this);
1277  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1278  BlockTrampolinePoolFor(1); // For associated delay slot.
1279 }
1280 
1281 
1282 void Assembler::bltzal(Register rs, int16_t offset) {
1283  DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
1284  BlockTrampolinePoolScope block_trampoline_pool(this);
1285  positions_recorder()->WriteRecordedPositions();
1286  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1287  BlockTrampolinePoolFor(1); // For associated delay slot.
1288 }
1289 
1290 
1291 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1292  BlockTrampolinePoolScope block_trampoline_pool(this);
1293  GenInstrImmediate(BNE, rs, rt, offset);
1294  BlockTrampolinePoolFor(1); // For associated delay slot.
1295 }
1296 
1297 
1298 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1300  DCHECK(!(rs.is(zero_reg)));
1301  DCHECK(rs.code() >= rt.code());
1302  GenInstrImmediate(ADDI, rs, rt, offset);
1303 }
1304 
1305 
1306 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1308  DCHECK(!(rs.is(zero_reg)));
1309  DCHECK(rs.code() >= rt.code());
1310  GenInstrImmediate(DADDI, rs, rt, offset);
1311 }
1312 
1313 
1314 void Assembler::blezalc(Register rt, int16_t offset) {
1316  DCHECK(!(rt.is(zero_reg)));
1317  GenInstrImmediate(BLEZ, zero_reg, rt, offset);
1318 }
1319 
1320 
1321 void Assembler::bgezalc(Register rt, int16_t offset) {
1323  DCHECK(!(rt.is(zero_reg)));
1324  GenInstrImmediate(BLEZ, rt, rt, offset);
1325 }
1326 
1327 
1328 void Assembler::bgezall(Register rs, int16_t offset) {
1330  DCHECK(!(rs.is(zero_reg)));
1331  GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1332 }
1333 
1334 
1335 void Assembler::bltzalc(Register rt, int16_t offset) {
1337  DCHECK(!(rt.is(zero_reg)));
1338  GenInstrImmediate(BGTZ, rt, rt, offset);
1339 }
1340 
1341 
1342 void Assembler::bgtzalc(Register rt, int16_t offset) {
1344  DCHECK(!(rt.is(zero_reg)));
1345  GenInstrImmediate(BGTZ, zero_reg, rt, offset);
1346 }
1347 
1348 
1349 void Assembler::beqzalc(Register rt, int16_t offset) {
1351  DCHECK(!(rt.is(zero_reg)));
1352  GenInstrImmediate(ADDI, zero_reg, rt, offset);
1353 }
1354 
1355 
1356 void Assembler::bnezalc(Register rt, int16_t offset) {
1358  DCHECK(!(rt.is(zero_reg)));
1359  GenInstrImmediate(DADDI, zero_reg, rt, offset);
1360 }
1361 
1362 
1363 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1365  DCHECK(rs.code() < rt.code());
1366  GenInstrImmediate(ADDI, rs, rt, offset);
1367 }
1368 
1369 
1370 void Assembler::beqzc(Register rs, int32_t offset) {
1372  DCHECK(!(rs.is(zero_reg)));
1373  Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
1374  emit(instr);
1375 }
1376 
1377 
1378 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1380  DCHECK(rs.code() < rt.code());
1381  GenInstrImmediate(DADDI, rs, rt, offset);
1382 }
1383 
1384 
1385 void Assembler::bnezc(Register rs, int32_t offset) {
1387  DCHECK(!(rs.is(zero_reg)));
1388  Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
1389  emit(instr);
1390 }
1391 
1392 
1393 void Assembler::j(int32_t target) {
1394 #if DEBUG
1395  // Get pc of delay slot.
1396  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1397  bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1398  (kImm26Bits + kImmFieldShift)) == 0;
1399  DCHECK(in_range && ((target & 3) == 0));
1400 #endif
1401  GenInstrJump(J, target >> 2);
1402 }
1403 
1404 
1405 void Assembler::jr(Register rs) {
1406  if (!IsMipsArchVariant(kMips32r6)) {
1407  BlockTrampolinePoolScope block_trampoline_pool(this);
1408  if (rs.is(ra)) {
1409  positions_recorder()->WriteRecordedPositions();
1410  }
1411  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1412  BlockTrampolinePoolFor(1); // For associated delay slot.
1413  } else {
1414  jalr(rs, zero_reg);
1415  }
1416 }
1417 
1418 
1419 void Assembler::jal(int32_t target) {
1420 #ifdef DEBUG
1421  // Get pc of delay slot.
1422  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1423  bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1424  (kImm26Bits + kImmFieldShift)) == 0;
1425  DCHECK(in_range && ((target & 3) == 0));
1426 #endif
1427  positions_recorder()->WriteRecordedPositions();
1428  GenInstrJump(JAL, target >> 2);
1429 }
1430 
1431 
1432 void Assembler::jalr(Register rs, Register rd) {
1433  BlockTrampolinePoolScope block_trampoline_pool(this);
1434  positions_recorder()->WriteRecordedPositions();
1435  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1436  BlockTrampolinePoolFor(1); // For associated delay slot.
1437 }
1438 
1439 
1440 void Assembler::j_or_jr(int32_t target, Register rs) {
1441  // Get pc of delay slot.
1442  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1443  bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1444  (kImm26Bits + kImmFieldShift)) == 0;
1445  if (in_range) {
1446  j(target);
1447  } else {
1448  jr(t9);
1449  }
1450 }
1451 
1452 
1453 void Assembler::jal_or_jalr(int32_t target, Register rs) {
1454  // Get pc of delay slot.
1455  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1456  bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1457  (kImm26Bits+kImmFieldShift)) == 0;
1458  if (in_range) {
1459  jal(target);
1460  } else {
1461  jalr(t9);
1462  }
1463 }
1464 
1465 
1466 // -------Data-processing-instructions---------
1467 
1468 // Arithmetic.
1469 
1470 void Assembler::addu(Register rd, Register rs, Register rt) {
1471  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1472 }
1473 
1474 
1475 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1476  GenInstrImmediate(ADDIU, rs, rd, j);
1477 }
1478 
1479 
1480 void Assembler::subu(Register rd, Register rs, Register rt) {
1481  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1482 }
1483 
1484 
1485 void Assembler::mul(Register rd, Register rs, Register rt) {
1486  if (!IsMipsArchVariant(kMips32r6)) {
1487  GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1488  } else {
1489  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1490  }
1491 }
1492 
1493 
1494 void Assembler::mulu(Register rd, Register rs, Register rt) {
1496  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1497 }
1498 
1499 
1500 void Assembler::muh(Register rd, Register rs, Register rt) {
1502  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1503 }
1504 
1505 
1506 void Assembler::muhu(Register rd, Register rs, Register rt) {
1508  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1509 }
1510 
1511 
1512 void Assembler::mod(Register rd, Register rs, Register rt) {
1514  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1515 }
1516 
1517 
1518 void Assembler::modu(Register rd, Register rs, Register rt) {
1520  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1521 }
1522 
1523 
1524 void Assembler::mult(Register rs, Register rt) {
1525  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1526 }
1527 
1528 
1529 void Assembler::multu(Register rs, Register rt) {
1530  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1531 }
1532 
1533 
1534 void Assembler::div(Register rs, Register rt) {
1535  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1536 }
1537 
1538 
1539 void Assembler::div(Register rd, Register rs, Register rt) {
1541  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1542 }
1543 
1544 
1545 void Assembler::divu(Register rs, Register rt) {
1546  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1547 }
1548 
1549 
1550 void Assembler::divu(Register rd, Register rs, Register rt) {
1552  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1553 }
1554 
1555 
1556 // Logical.
1557 
1558 void Assembler::and_(Register rd, Register rs, Register rt) {
1559  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1560 }
1561 
1562 
1563 void Assembler::andi(Register rt, Register rs, int32_t j) {
1564  DCHECK(is_uint16(j));
1565  GenInstrImmediate(ANDI, rs, rt, j);
1566 }
1567 
1568 
1569 void Assembler::or_(Register rd, Register rs, Register rt) {
1570  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1571 }
1572 
1573 
1574 void Assembler::ori(Register rt, Register rs, int32_t j) {
1575  DCHECK(is_uint16(j));
1576  GenInstrImmediate(ORI, rs, rt, j);
1577 }
1578 
1579 
1580 void Assembler::xor_(Register rd, Register rs, Register rt) {
1581  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1582 }
1583 
1584 
1585 void Assembler::xori(Register rt, Register rs, int32_t j) {
1586  DCHECK(is_uint16(j));
1587  GenInstrImmediate(XORI, rs, rt, j);
1588 }
1589 
1590 
1591 void Assembler::nor(Register rd, Register rs, Register rt) {
1592  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1593 }
1594 
1595 
1596 // Shifts.
1597 void Assembler::sll(Register rd,
1598  Register rt,
1599  uint16_t sa,
1600  bool coming_from_nop) {
1601  // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1602  // generated using the sll instruction. They must be generated using
1603  // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1604  // instructions.
1605  DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1606  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1607 }
1608 
1609 
1610 void Assembler::sllv(Register rd, Register rt, Register rs) {
1611  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1612 }
1613 
1614 
1615 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1616  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1617 }
1618 
1619 
1620 void Assembler::srlv(Register rd, Register rt, Register rs) {
1621  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1622 }
1623 
1624 
1625 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1626  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1627 }
1628 
1629 
1630 void Assembler::srav(Register rd, Register rt, Register rs) {
1631  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1632 }
1633 
1634 
1635 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1636  // Should be called via MacroAssembler::Ror.
1637  DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1639  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1640  | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1641  emit(instr);
1642 }
1643 
1644 
1645 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1646  // Should be called via MacroAssembler::Ror.
1647  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1649  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1650  | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1651  emit(instr);
1652 }
1653 
1654 
1655 // ------------Memory-instructions-------------
1656 
1657 // Helper for base-reg + offset, when offset is larger than int16.
1658 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1659  DCHECK(!src.rm().is(at));
1660  lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
1661  ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1662  addu(at, at, src.rm()); // Add base register.
1663 }
1664 
1665 
1666 void Assembler::lb(Register rd, const MemOperand& rs) {
1667  if (is_int16(rs.offset_)) {
1668  GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1669  } else { // Offset > 16 bits, use multiple instructions to load.
1670  LoadRegPlusOffsetToAt(rs);
1671  GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1672  }
1673 }
1674 
1675 
1676 void Assembler::lbu(Register rd, const MemOperand& rs) {
1677  if (is_int16(rs.offset_)) {
1678  GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1679  } else { // Offset > 16 bits, use multiple instructions to load.
1680  LoadRegPlusOffsetToAt(rs);
1681  GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1682  }
1683 }
1684 
1685 
1686 void Assembler::lh(Register rd, const MemOperand& rs) {
1687  if (is_int16(rs.offset_)) {
1688  GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1689  } else { // Offset > 16 bits, use multiple instructions to load.
1690  LoadRegPlusOffsetToAt(rs);
1691  GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
1692  }
1693 }
1694 
1695 
1696 void Assembler::lhu(Register rd, const MemOperand& rs) {
1697  if (is_int16(rs.offset_)) {
1698  GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1699  } else { // Offset > 16 bits, use multiple instructions to load.
1700  LoadRegPlusOffsetToAt(rs);
1701  GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
1702  }
1703 }
1704 
1705 
1706 void Assembler::lw(Register rd, const MemOperand& rs) {
1707  if (is_int16(rs.offset_)) {
1708  GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1709  } else { // Offset > 16 bits, use multiple instructions to load.
1710  LoadRegPlusOffsetToAt(rs);
1711  GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1712  }
1713 }
1714 
1715 
1716 void Assembler::lwl(Register rd, const MemOperand& rs) {
1717  GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1718 }
1719 
1720 
1721 void Assembler::lwr(Register rd, const MemOperand& rs) {
1722  GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1723 }
1724 
1725 
1726 void Assembler::sb(Register rd, const MemOperand& rs) {
1727  if (is_int16(rs.offset_)) {
1728  GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1729  } else { // Offset > 16 bits, use multiple instructions to store.
1730  LoadRegPlusOffsetToAt(rs);
1731  GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
1732  }
1733 }
1734 
1735 
1736 void Assembler::sh(Register rd, const MemOperand& rs) {
1737  if (is_int16(rs.offset_)) {
1738  GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1739  } else { // Offset > 16 bits, use multiple instructions to store.
1740  LoadRegPlusOffsetToAt(rs);
1741  GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
1742  }
1743 }
1744 
1745 
1746 void Assembler::sw(Register rd, const MemOperand& rs) {
1747  if (is_int16(rs.offset_)) {
1748  GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1749  } else { // Offset > 16 bits, use multiple instructions to store.
1750  LoadRegPlusOffsetToAt(rs);
1751  GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1752  }
1753 }
1754 
1755 
1756 void Assembler::swl(Register rd, const MemOperand& rs) {
1757  GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1758 }
1759 
1760 
1761 void Assembler::swr(Register rd, const MemOperand& rs) {
1762  GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1763 }
1764 
1765 
1766 void Assembler::lui(Register rd, int32_t j) {
1767  DCHECK(is_uint16(j));
1768  GenInstrImmediate(LUI, zero_reg, rd, j);
1769 }
1770 
1771 
1772 void Assembler::aui(Register rs, Register rt, int32_t j) {
1773  // This instruction uses same opcode as 'lui'. The difference in encoding is
1774  // 'lui' has zero reg. for rs field.
1775  DCHECK(is_uint16(j));
1776  GenInstrImmediate(LUI, rs, rt, j);
1777 }
1778 
1779 
1780 // -------------Misc-instructions--------------
1781 
1782 // Break / Trap instructions.
1783 void Assembler::break_(uint32_t code, bool break_as_stop) {
1784  DCHECK((code & ~0xfffff) == 0);
1785  // We need to invalidate breaks that could be stops as well because the
1786  // simulator expects a char pointer after the stop instruction.
1787  // See constants-mips.h for explanation.
1788  DCHECK((break_as_stop &&
1789  code <= kMaxStopCode &&
1790  code > kMaxWatchpointCode) ||
1791  (!break_as_stop &&
1792  (code > kMaxStopCode ||
1793  code <= kMaxWatchpointCode)));
1794  Instr break_instr = SPECIAL | BREAK | (code << 6);
1795  emit(break_instr);
1796 }
1797 
1798 
1799 void Assembler::stop(const char* msg, uint32_t code) {
1800  DCHECK(code > kMaxWatchpointCode);
1801  DCHECK(code <= kMaxStopCode);
1802 #if V8_HOST_ARCH_MIPS
1803  break_(0x54321);
1804 #else // V8_HOST_ARCH_MIPS
1805  BlockTrampolinePoolFor(2);
1806  // The Simulator will handle the stop instruction and get the message address.
1807  // On MIPS stop() is just a special kind of break_().
1808  break_(code, true);
1809  emit(reinterpret_cast<Instr>(msg));
1810 #endif
1811 }
1812 
1813 
1814 void Assembler::tge(Register rs, Register rt, uint16_t code) {
1815  DCHECK(is_uint10(code));
1816  Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1817  | rt.code() << kRtShift | code << 6;
1818  emit(instr);
1819 }
1820 
1821 
1822 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
1823  DCHECK(is_uint10(code));
1824  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
1825  | rt.code() << kRtShift | code << 6;
1826  emit(instr);
1827 }
1828 
1829 
1830 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
1831  DCHECK(is_uint10(code));
1832  Instr instr =
1833  SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1834  emit(instr);
1835 }
1836 
1837 
1838 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
1839  DCHECK(is_uint10(code));
1840  Instr instr =
1841  SPECIAL | TLTU | rs.code() << kRsShift
1842  | rt.code() << kRtShift | code << 6;
1843  emit(instr);
1844 }
1845 
1846 
1847 void Assembler::teq(Register rs, Register rt, uint16_t code) {
1848  DCHECK(is_uint10(code));
1849  Instr instr =
1850  SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1851  emit(instr);
1852 }
1853 
1854 
1855 void Assembler::tne(Register rs, Register rt, uint16_t code) {
1856  DCHECK(is_uint10(code));
1857  Instr instr =
1858  SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1859  emit(instr);
1860 }
1861 
1862 
1863 // Move from HI/LO register.
1864 
1865 void Assembler::mfhi(Register rd) {
1866  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
1867 }
1868 
1869 
1870 void Assembler::mflo(Register rd) {
1871  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
1872 }
1873 
1874 
1875 // Set on less than instructions.
1876 void Assembler::slt(Register rd, Register rs, Register rt) {
1877  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
1878 }
1879 
1880 
1881 void Assembler::sltu(Register rd, Register rs, Register rt) {
1882  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
1883 }
1884 
1885 
1886 void Assembler::slti(Register rt, Register rs, int32_t j) {
1887  GenInstrImmediate(SLTI, rs, rt, j);
1888 }
1889 
1890 
1891 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
1892  GenInstrImmediate(SLTIU, rs, rt, j);
1893 }
1894 
1895 
1896 // Conditional move.
1897 void Assembler::movz(Register rd, Register rs, Register rt) {
1898  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
1899 }
1900 
1901 
1902 void Assembler::movn(Register rd, Register rs, Register rt) {
1903  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
1904 }
1905 
1906 
1907 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
1908  Register rt;
1909  rt.code_ = (cc & 0x0007) << 2 | 1;
1910  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1911 }
1912 
1913 
1914 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1915  Register rt;
1916  rt.code_ = (cc & 0x0007) << 2 | 0;
1917  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1918 }
1919 
1920 
1921 // Bit twiddling.
1922 void Assembler::clz(Register rd, Register rs) {
1923  if (!IsMipsArchVariant(kMips32r6)) {
1924  // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1925  GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1926  } else {
1927  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
1928  }
1929 }
1930 
1931 
1932 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1933  // Should be called via MacroAssembler::Ins.
1934  // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1936  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1937 }
1938 
1939 
1940 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1941  // Should be called via MacroAssembler::Ext.
1942  // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1944  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1945 }
1946 
1947 
1948 void Assembler::pref(int32_t hint, const MemOperand& rs) {
1950  DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
1951  Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
1952  | (rs.offset_);
1953  emit(instr);
1954 }
1955 
1956 
1957 // --------Coprocessor-instructions----------------
1958 
1959 // Load, store, move.
1960 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1961  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1962 }
1963 
1964 
1965 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
1966  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1967  // load to two 32-bit loads.
1968  if (IsFp64Mode()) {
1969  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
1970  Register::kMantissaOffset);
1971  GenInstrImmediate(LW, src.rm(), at, src.offset_ +
1972  Register::kExponentOffset);
1973  mthc1(at, fd);
1974  } else {
1975  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
1976  Register::kMantissaOffset);
1977  FPURegister nextfpreg;
1978  nextfpreg.setcode(fd.code() + 1);
1979  GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
1980  Register::kExponentOffset);
1981  }
1982 }
1983 
1984 
1985 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1986  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1987 }
1988 
1989 
1990 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
1991  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1992  // store to two 32-bit stores.
1993  if (IsFp64Mode()) {
1994  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
1995  Register::kMantissaOffset);
1996  mfhc1(at, fd);
1997  GenInstrImmediate(SW, src.rm(), at, src.offset_ +
1998  Register::kExponentOffset);
1999  } else {
2000  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
2001  Register::kMantissaOffset);
2002  FPURegister nextfpreg;
2003  nextfpreg.setcode(fd.code() + 1);
2004  GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
2005  Register::kExponentOffset);
2006  }
2007 }
2008 
2009 
2010 void Assembler::mtc1(Register rt, FPURegister fs) {
2011  GenInstrRegister(COP1, MTC1, rt, fs, f0);
2012 }
2013 
2014 
2015 void Assembler::mthc1(Register rt, FPURegister fs) {
2016  GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2017 }
2018 
2019 
2020 void Assembler::mfc1(Register rt, FPURegister fs) {
2021  GenInstrRegister(COP1, MFC1, rt, fs, f0);
2022 }
2023 
2024 
2025 void Assembler::mfhc1(Register rt, FPURegister fs) {
2026  GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2027 }
2028 
2029 
2030 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2031  GenInstrRegister(COP1, CTC1, rt, fs);
2032 }
2033 
2034 
2035 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2036  GenInstrRegister(COP1, CFC1, rt, fs);
2037 }
2038 
2039 
2040 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2041  uint64_t i;
2042  memcpy(&i, &d, 8);
2043 
2044  *lo = i & 0xffffffff;
2045  *hi = i >> 32;
2046 }
2047 
2048 
2049 // Arithmetic.
2050 
2051 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2052  GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2053 }
2054 
2055 
2056 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2057  GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2058 }
2059 
2060 
2061 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2062  GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2063 }
2064 
2065 
2066 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2067  FPURegister ft) {
2068  GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2069 }
2070 
2071 
2072 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2073  GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2074 }
2075 
2076 
2077 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2078  GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2079 }
2080 
2081 
2082 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2083  GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2084 }
2085 
2086 
2087 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2088  GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2089 }
2090 
2091 
2092 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2093  GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2094 }
2095 
2096 
2097 // Conversions.
2098 
2099 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2100  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2101 }
2102 
2103 
2104 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2105  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2106 }
2107 
2108 
2109 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2110  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2111 }
2112 
2113 
2114 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2115  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2116 }
2117 
2118 
2119 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2120  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2121 }
2122 
2123 
2124 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2125  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2126 }
2127 
2128 
2129 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2130  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2131 }
2132 
2133 
2134 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2135  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2136 }
2137 
2138 
2139 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2140  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2141 }
2142 
2143 
2144 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2145  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2146 }
2147 
2148 
2149 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2151  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2152 }
2153 
2154 
2155 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
2157  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2158 }
2159 
2160 
2161 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
2163  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2164 }
2165 
2166 
2167 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
2169  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2170 }
2171 
2172 
2173 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2174  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2175 }
2176 
2177 
2178 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2179  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2180 }
2181 
2182 
2183 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
2184  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
2185 }
2186 
2187 
2188 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
2189  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
2190 }
2191 
2192 
2193 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
2194  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
2195 }
2196 
2197 
2198 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
2199  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
2200 }
2201 
2202 
2203 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
2204  FPURegister fs) {
2206  DCHECK((fmt == D) || (fmt == S));
2207  GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2208 }
2209 
2210 
2211 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
2212  FPURegister fs) {
2214  DCHECK((fmt == D) || (fmt == S));
2215  GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2216 }
2217 
2218 
2219 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
2220  FPURegister fs) {
2222  DCHECK((fmt == D) || (fmt == S));
2223  GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2224 }
2225 
2226 
2227 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
2228  FPURegister fs) {
2230  DCHECK((fmt == D) || (fmt == S));
2231  GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
2232 }
2233 
2234 
2235 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
2236  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
2237 }
2238 
2239 
2240 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
2242  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
2243 }
2244 
2245 
2246 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
2247  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
2248 }
2249 
2250 
2251 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
2252  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
2253 }
2254 
2255 
2256 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
2258  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
2259 }
2260 
2261 
2262 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
2263  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
2264 }
2265 
2266 
2267 // Conditions for >= MIPSr6.
2268 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
2269  FPURegister fd, FPURegister fs, FPURegister ft) {
2271  DCHECK((fmt & ~(31 << kRsShift)) == 0);
2272  Instr instr = COP1 | fmt | ft.code() << kFtShift |
2273  fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
2274  emit(instr);
2275 }
2276 
2277 
2278 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
2280  Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
2281  emit(instr);
2282 }
2283 
2284 
2285 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
2287  Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
2288  emit(instr);
2289 }
2290 
2291 
2292 // Conditions for < MIPSr6.
2293 void Assembler::c(FPUCondition cond, SecondaryField fmt,
2294  FPURegister fs, FPURegister ft, uint16_t cc) {
2295  DCHECK(is_uint3(cc));
2296  DCHECK((fmt & ~(31 << kRsShift)) == 0);
2297  Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
2298  | cc << 8 | 3 << 4 | cond;
2299  emit(instr);
2300 }
2301 
2302 
2303 void Assembler::fcmp(FPURegister src1, const double src2,
2304  FPUCondition cond) {
2305  DCHECK(src2 == 0.0);
2306  mtc1(zero_reg, f14);
2307  cvt_d_w(f14, f14);
2308  c(cond, D, src1, f14, 0);
2309 }
2310 
2311 
2312 void Assembler::bc1f(int16_t offset, uint16_t cc) {
2313  DCHECK(is_uint3(cc));
2314  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
2315  emit(instr);
2316 }
2317 
2318 
2319 void Assembler::bc1t(int16_t offset, uint16_t cc) {
2320  DCHECK(is_uint3(cc));
2321  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
2322  emit(instr);
2323 }
2324 
2325 
2326 // Debugging.
2327 void Assembler::RecordJSReturn() {
2328  positions_recorder()->WriteRecordedPositions();
2329  CheckBuffer();
2330  RecordRelocInfo(RelocInfo::JS_RETURN);
2331 }
2332 
2333 
2334 void Assembler::RecordDebugBreakSlot() {
2335  positions_recorder()->WriteRecordedPositions();
2336  CheckBuffer();
2337  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2338 }
2339 
2340 
2341 void Assembler::RecordComment(const char* msg) {
2342  if (FLAG_code_comments) {
2343  CheckBuffer();
2344  RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2345  }
2346 }
2347 
2348 
2349 int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
2350  Instr instr = instr_at(pc);
2351  DCHECK(IsJ(instr) || IsLui(instr));
2352  if (IsLui(instr)) {
2353  Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
2354  Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
2355  DCHECK(IsOri(instr_ori));
2356  int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
2357  imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
2358  if (imm == kEndOfJumpChain) {
2359  return 0; // Number of instructions patched.
2360  }
2361  imm += pc_delta;
2362  DCHECK((imm & 3) == 0);
2363 
2364  instr_lui &= ~kImm16Mask;
2365  instr_ori &= ~kImm16Mask;
2366 
2367  instr_at_put(pc + 0 * Assembler::kInstrSize,
2368  instr_lui | ((imm >> kLuiShift) & kImm16Mask));
2369  instr_at_put(pc + 1 * Assembler::kInstrSize,
2370  instr_ori | (imm & kImm16Mask));
2371  return 2; // Number of instructions patched.
2372  } else {
2373  uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
2374  if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
2375  return 0; // Number of instructions patched.
2376  }
2377  imm28 += pc_delta;
2378  imm28 &= kImm28Mask;
2379  DCHECK((imm28 & 3) == 0);
2380 
2381  instr &= ~kImm26Mask;
2382  uint32_t imm26 = imm28 >> 2;
2383  DCHECK(is_uint26(imm26));
2384 
2385  instr_at_put(pc, instr | (imm26 & kImm26Mask));
2386  return 1; // Number of instructions patched.
2387  }
2388 }
2389 
2390 
2391 void Assembler::GrowBuffer() {
2392  if (!own_buffer_) FATAL("external code buffer is too small");
2393 
2394  // Compute new buffer size.
2395  CodeDesc desc; // The new buffer.
2396  if (buffer_size_ < 1 * MB) {
2397  desc.buffer_size = 2*buffer_size_;
2398  } else {
2399  desc.buffer_size = buffer_size_ + 1*MB;
2400  }
2401  CHECK_GT(desc.buffer_size, 0); // No overflow.
2402 
2403  // Set up new buffer.
2404  desc.buffer = NewArray<byte>(desc.buffer_size);
2405 
2406  desc.instr_size = pc_offset();
2407  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2408 
2409  // Copy the data.
2410  int pc_delta = desc.buffer - buffer_;
2411  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2412  MemMove(desc.buffer, buffer_, desc.instr_size);
2413  MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2414  desc.reloc_size);
2415 
2416  // Switch buffers.
2417  DeleteArray(buffer_);
2418  buffer_ = desc.buffer;
2419  buffer_size_ = desc.buffer_size;
2420  pc_ += pc_delta;
2421  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2422  reloc_info_writer.last_pc() + pc_delta);
2423 
2424  // Relocate runtime entries.
2425  for (RelocIterator it(desc); !it.done(); it.next()) {
2426  RelocInfo::Mode rmode = it.rinfo()->rmode();
2427  if (rmode == RelocInfo::INTERNAL_REFERENCE) {
2428  byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
2429  RelocateInternalReference(p, pc_delta);
2430  }
2431  }
2432 
2433  DCHECK(!overflow());
2434 }
2435 
2436 
2437 void Assembler::db(uint8_t data) {
2438  CheckBuffer();
2439  *reinterpret_cast<uint8_t*>(pc_) = data;
2440  pc_ += sizeof(uint8_t);
2441 }
2442 
2443 
2444 void Assembler::dd(uint32_t data) {
2445  CheckBuffer();
2446  *reinterpret_cast<uint32_t*>(pc_) = data;
2447  pc_ += sizeof(uint32_t);
2448 }
2449 
2450 
2451 void Assembler::emit_code_stub_address(Code* stub) {
2452  CheckBuffer();
2453  *reinterpret_cast<uint32_t*>(pc_) =
2454  reinterpret_cast<uint32_t>(stub->instruction_start());
2455  pc_ += sizeof(uint32_t);
2456 }
2457 
2458 
2459 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2460  // We do not try to reuse pool constants.
2461  RelocInfo rinfo(pc_, rmode, data, NULL);
2462  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2463  // Adjust code for new modes.
2464  DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
2465  || RelocInfo::IsJSReturn(rmode)
2466  || RelocInfo::IsComment(rmode)
2467  || RelocInfo::IsPosition(rmode));
2468  // These modes do not need an entry in the constant pool.
2469  }
2470  if (!RelocInfo::IsNone(rinfo.rmode())) {
2471  // Don't record external references unless the heap will be serialized.
2472  if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
2473  !serializer_enabled() && !emit_debug_code()) {
2474  return;
2475  }
2476  DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
2477  if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2478  RelocInfo reloc_info_with_ast_id(pc_,
2479  rmode,
2480  RecordedAstId().ToInt(),
2481  NULL);
2482  ClearRecordedAstId();
2483  reloc_info_writer.Write(&reloc_info_with_ast_id);
2484  } else {
2485  reloc_info_writer.Write(&rinfo);
2486  }
2487  }
2488 }
2489 
2490 
2491 void Assembler::BlockTrampolinePoolFor(int instructions) {
2492  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2493 }
2494 
2495 
2496 void Assembler::CheckTrampolinePool() {
2497  // Some small sequences of instructions must not be broken up by the
2498  // insertion of a trampoline pool; such sequences are protected by setting
2499  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2500  // which are both checked here. Also, recursive calls to CheckTrampolinePool
2501  // are blocked by trampoline_pool_blocked_nesting_.
2502  if ((trampoline_pool_blocked_nesting_ > 0) ||
2503  (pc_offset() < no_trampoline_pool_before_)) {
2504  // Emission is currently blocked; make sure we try again as soon as
2505  // possible.
2506  if (trampoline_pool_blocked_nesting_ > 0) {
2507  next_buffer_check_ = pc_offset() + kInstrSize;
2508  } else {
2509  next_buffer_check_ = no_trampoline_pool_before_;
2510  }
2511  return;
2512  }
2513 
2514  DCHECK(!trampoline_emitted_);
2515  DCHECK(unbound_labels_count_ >= 0);
2516  if (unbound_labels_count_ > 0) {
2517  // First we emit jump (2 instructions), then we emit trampoline pool.
2518  { BlockTrampolinePoolScope block_trampoline_pool(this);
2519  Label after_pool;
2520  b(&after_pool);
2521  nop();
2522 
2523  int pool_start = pc_offset();
2524  for (int i = 0; i < unbound_labels_count_; i++) {
2525  uint32_t imm32;
2526  imm32 = jump_address(&after_pool);
2527  { BlockGrowBufferScope block_buf_growth(this);
2528  // Buffer growth (and relocation) must be blocked for internal
2529  // references until associated instructions are emitted and available
2530  // to be patched.
2531  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2532  lui(at, (imm32 & kHiMask) >> kLuiShift);
2533  ori(at, at, (imm32 & kImm16Mask));
2534  }
2535  jr(at);
2536  nop();
2537  }
2538  bind(&after_pool);
2539  trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2540 
2541  trampoline_emitted_ = true;
2542  // As we are only going to emit trampoline once, we need to prevent any
2543  // further emission.
2544  next_buffer_check_ = kMaxInt;
2545  }
2546  } else {
2547  // Number of branches to unbound label at this point is zero, so we can
2548  // move next buffer check to maximum.
2549  next_buffer_check_ = pc_offset() +
2550  kMaxBranchOffset - kTrampolineSlotsSize * 16;
2551  }
2552  return;
2553 }
2554 
2555 
2556 Address Assembler::target_address_at(Address pc) {
2557  Instr instr1 = instr_at(pc);
2558  Instr instr2 = instr_at(pc + kInstrSize);
2559  // Interpret 2 instructions generated by li: lui/ori
2560  if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
2561  // Assemble the 32 bit value.
2562  return reinterpret_cast<Address>(
2563  (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
2564  }
2565 
2566  // We should never get here, force a bad address if we do.
2567  UNREACHABLE();
2568  return (Address)0x0;
2569 }
2570 
2571 
2572 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
2573 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
2574 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
2575 // OS::nan_value() returns a qNaN.
2576 void Assembler::QuietNaN(HeapObject* object) {
2577  HeapNumber::cast(object)->set_value(base::OS::nan_value());
2578 }
2579 
2580 
2581 // On Mips, a target address is stored in a lui/ori instruction pair, each
2582 // of which load 16 bits of the 32-bit address to a register.
2583 // Patching the address must replace both instr, and flush the i-cache.
2584 //
2585 // There is an optimization below, which emits a nop when the address
2586 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
2587 // and possibly removed.
2588 void Assembler::set_target_address_at(Address pc,
2589  Address target,
2590  ICacheFlushMode icache_flush_mode) {
2591  Instr instr2 = instr_at(pc + kInstrSize);
2592  uint32_t rt_code = GetRtField(instr2);
2593  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2594  uint32_t itarget = reinterpret_cast<uint32_t>(target);
2595 
2596 #ifdef DEBUG
2597  // Check we have the result from a li macro-instruction, using instr pair.
2598  Instr instr1 = instr_at(pc);
2599  CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
2600 #endif
2601 
2602  // Must use 2 instructions to insure patchable code => just use lui and ori.
2603  // lui rt, upper-16.
2604  // ori rt rt, lower-16.
2605  *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2606  *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
2607 
2608  // The following code is an optimization for the common case of Call()
2609  // or Jump() which is load to register, and jump through register:
2610  // li(t9, address); jalr(t9) (or jr(t9)).
2611  // If the destination address is in the same 256 MB page as the call, it
2612  // is faster to do a direct jal, or j, rather than jump thru register, since
2613  // that lets the cpu pipeline prefetch the target address. However each
2614  // time the address above is patched, we have to patch the direct jal/j
2615  // instruction, as well as possibly revert to jalr/jr if we now cross a
2616  // 256 MB page. Note that with the jal/j instructions, we do not need to
2617  // load the register, but that code is left, since it makes it easy to
2618  // revert this process. A further optimization could try replacing the
2619  // li sequence with nops.
2620  // This optimization can only be applied if the rt-code from instr2 is the
2621  // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
2622  // mips return. Occasionally this lands after an li().
2623 
2624  Instr instr3 = instr_at(pc + 2 * kInstrSize);
2625  uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
2626  bool in_range = ((ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
2627  uint32_t target_field =
2628  static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift;
2629  bool patched_jump = false;
2630 
2631 #ifndef ALLOW_JAL_IN_BOUNDARY_REGION
2632  // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
2633  // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
2634  // apply this workaround for all cores so we don't have to identify the core.
2635  if (in_range) {
2636  // The 24k core E156 bug has some very specific requirements, we only check
2637  // the most simple one: if the address of the delay slot instruction is in
2638  // the first or last 32 KB of the 256 MB segment.
2639  uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
2640  uint32_t ipc_segment_addr = ipc & segment_mask;
2641  if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
2642  in_range = false;
2643  }
2644 #endif
2645 
2646  if (IsJalr(instr3)) {
2647  // Try to convert JALR to JAL.
2648  if (in_range && GetRt(instr2) == GetRs(instr3)) {
2649  *(p + 2) = JAL | target_field;
2650  patched_jump = true;
2651  }
2652  } else if (IsJr(instr3)) {
2653  // Try to convert JR to J, skip returns (jr ra).
2654  bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
2655  if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
2656  *(p + 2) = J | target_field;
2657  patched_jump = true;
2658  }
2659  } else if (IsJal(instr3)) {
2660  if (in_range) {
2661  // We are patching an already converted JAL.
2662  *(p + 2) = JAL | target_field;
2663  } else {
2664  // Patch JAL, but out of range, revert to JALR.
2665  // JALR rs reg is the rt reg specified in the ORI instruction.
2666  uint32_t rs_field = GetRt(instr2) << kRsShift;
2667  uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2668  *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2669  }
2670  patched_jump = true;
2671  } else if (IsJ(instr3)) {
2672  if (in_range) {
2673  // We are patching an already converted J (jump).
2674  *(p + 2) = J | target_field;
2675  } else {
2676  // Trying patch J, but out of range, just go back to JR.
2677  // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
2678  uint32_t rs_field = GetRt(instr2) << kRsShift;
2680  *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
2681  } else {
2682  *(p + 2) = SPECIAL | rs_field | JR;
2683  }
2684  }
2685  patched_jump = true;
2686  }
2687 
2688  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
2689  CpuFeatures::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
2690  }
2691 }
2692 
2693 
2694 void Assembler::JumpLabelToJumpRegister(Address pc) {
2695  // Address pc points to lui/ori instructions.
2696  // Jump to label may follow at pc + 2 * kInstrSize.
2697  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2698 #ifdef DEBUG
2699  Instr instr1 = instr_at(pc);
2700 #endif
2701  Instr instr2 = instr_at(pc + 1 * kInstrSize);
2702  Instr instr3 = instr_at(pc + 2 * kInstrSize);
2703  bool patched = false;
2704 
2705  if (IsJal(instr3)) {
2706  DCHECK(GetOpcodeField(instr1) == LUI);
2707  DCHECK(GetOpcodeField(instr2) == ORI);
2708 
2709  uint32_t rs_field = GetRt(instr2) << kRsShift;
2710  uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2711  *(p + 2) = SPECIAL | rs_field | rd_field | JALR;
2712  patched = true;
2713  } else if (IsJ(instr3)) {
2714  DCHECK(GetOpcodeField(instr1) == LUI);
2715  DCHECK(GetOpcodeField(instr2) == ORI);
2716 
2717  uint32_t rs_field = GetRt(instr2) << kRsShift;
2719  *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
2720  } else {
2721  *(p + 2) = SPECIAL | rs_field | JR;
2722  }
2723  patched = true;
2724  }
2725 
2726  if (patched) {
2727  CpuFeatures::FlushICache(pc + 2, sizeof(Address));
2728  }
2729 }
2730 
2731 
2732 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
2733  // No out-of-line constant pool support.
2734  DCHECK(!FLAG_enable_ool_constant_pool);
2735  return isolate->factory()->empty_constant_pool_array();
2736 }
2737 
2738 
2739 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2740  // No out-of-line constant pool support.
2741  DCHECK(!FLAG_enable_ool_constant_pool);
2742  return;
2743 }
2744 
2745 
2746 } } // namespace v8::internal
2747 
2748 #endif // V8_TARGET_ARCH_MIPS
static const int kInstrSize
static void FlushICache(void *start, size_t size)
static unsigned supported_
Definition: assembler.h:205
static void PrintFeatures()
static void ProbeImpl(bool cross_compile)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
static const int kCodeTargetMask
Definition: assembler.h:587
static const int kApplyMask
Definition: assembler.h:591
void PatchCode(byte *instructions, int instruction_count)
byte * pc() const
Definition: assembler.h:457
void PatchCodeWithCall(Address target, int guard_bytes)
#define UNIMPLEMENTED_MIPS()
@ kLoongson
@ kMips32r6
@ kMips32r2
#define IsFp64Mode()
#define IsMipsArchVariant(check)
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define CHECK(condition)
Definition: logging.h:36
#define FATAL(msg)
Definition: logging.h:26
#define CHECK_GT(a, b)
Definition: logging.h:177
#define DCHECK(condition)
Definition: logging.h:205
unsigned short uint16_t
Definition: unicode.cc:23
signed short int16_t
Definition: unicode.cc:22
int int32_t
Definition: unicode.cc:24
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
Matcher< Node * > IsBranch(const Matcher< Node * > &value_matcher, const Matcher< Node * > &control_matcher)
LinkageHelper< ArmLinkageHelperTraits > LH
Definition: linkage-arm.cc:36
const Instr kSwRegFpNegOffsetPattern
void DeleteArray(T *array)
Definition: allocation.h:68
const int kPointerSize
Definition: globals.h:129
const int kFunctionShift
const FPURegister f14
const Instr kPopInstruction
const int kRtShift
const int KB
Definition: globals.h:106
static int min(int a, int b)
Definition: liveedit.cc:273
const int kFsShift
const SwVfpRegister s1
const uint32_t kMaxStopCode
const uint32_t kMaxWatchpointCode
const int kRdShift
const int kImm26Bits
const SwVfpRegister s2
const int kFdShift
const SwVfpRegister s0
const Register fp
const Instr kLwSwInstrArgumentMask
const int kRsFieldMask
void MemMove(void *dest, const void *src, size_t size)
Definition: utils.h:353
int ToNumber(Register reg)
kSerializedDataOffset Object
Definition: objects-inl.h:5322
const int kMaxInt
Definition: globals.h:109
const Register sp
const int kFunctionFieldMask
const Instr kLwSwOffsetMask
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:146
const int kFtShift
const Instr kSwRegFpOffsetPattern
const int kRsShift
const SwVfpRegister s3
const Register pc
const Instr kLwRegFpNegOffsetPattern
const SwVfpRegister s6
byte * Address
Definition: globals.h:101
void PrintF(const char *format,...)
Definition: utils.cc:80
const SwVfpRegister s7
const int kImm28Mask
const int kHiMask
const int kHeapObjectTag
Definition: v8.h:5737
const Register no_reg
const int kRtFieldMask
Register ToRegister(int num)
const int kRegister_fp_Code
Definition: assembler-arm.h:87
const int kOpcodeMask
@ SKIP_ICACHE_FLUSH
Definition: assembler.h:293
const int kSaFieldMask
const int kLuiShift
const SwVfpRegister s4
const int kImmFieldShift
const Instr kPushRegPattern
const Instr kLwRegFpOffsetPattern
const int kJumpAddrMask
const Instr kPushInstruction
const Instr kPopRegPattern
const int kFrShift
const int kImm26Mask
const FPURegister f0
const int kSaShift
const Instr kLwSwInstrTypeMask
const SwVfpRegister s5
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:130
const Instr kRtMask
const int kNumRegisters
Definition: constants-arm.h:34
const int kRegister_sp_Code
Definition: assembler-arm.h:89
const int kRdFieldMask
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
@ None
Definition: v8.h:2211
static const uint16_t * Align(const uint16_t *chars)
Definition: api.cc:4266
const int MB
Definition: d8.cc:164
static const char * AllocationIndexToString(int index)
static const int kMaxNumAllocatableRegisters
#define S(x)
Definition: version.cc:55