V8 Project
assembler-mips64.cc
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 
36 #include "src/v8.h"
37 
38 #if V8_TARGET_ARCH_MIPS64
39 
40 #include "src/base/cpu.h"
42 #include "src/serialize.h"
43 
44 namespace v8 {
45 namespace internal {
46 
47 
48 // Get the CPU features enabled by the build. For cross compilation the
49 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
50 // can be defined to enable FPU instructions when building the
51 // snapshot.
52 static unsigned CpuFeaturesImpliedByCompiler() {
53  unsigned answer = 0;
54 #ifdef CAN_USE_FPU_INSTRUCTIONS
55  answer |= 1u << FPU;
56 #endif // def CAN_USE_FPU_INSTRUCTIONS
57 
58  // If the compiler is allowed to use FPU then we can use FPU too in our code
59  // generation even when generating snapshots. This won't work for cross
60  // compilation.
61 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
62  answer |= 1u << FPU;
63 #endif
64 
65  return answer;
66 }
67 
68 
69 const char* DoubleRegister::AllocationIndexToString(int index) {
70  DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
71  const char* const names[] = {
72  "f0",
73  "f2",
74  "f4",
75  "f6",
76  "f8",
77  "f10",
78  "f12",
79  "f14",
80  "f16",
81  "f18",
82  "f20",
83  "f22",
84  "f24",
85  "f26"
86  };
87  return names[index];
88 }
89 
90 
91 void CpuFeatures::ProbeImpl(bool cross_compile) {
92  supported_ |= CpuFeaturesImpliedByCompiler();
93 
94  // Only use statically determined features for cross compile (snapshot).
95  if (cross_compile) return;
96 
97  // If the compiler is allowed to use fpu then we can use fpu too in our
98  // code generation.
99 #ifndef __mips__
100  // For the simulator build, use FPU.
101  supported_ |= 1u << FPU;
102 #else
103  // Probe for additional features at runtime.
104  base::CPU cpu;
105  if (cpu.has_fpu()) supported_ |= 1u << FPU;
106 #endif
107 }
108 
109 
110 void CpuFeatures::PrintTarget() { }
112 
113 
114 int ToNumber(Register reg) {
115  DCHECK(reg.is_valid());
116  const int kNumbers[] = {
117  0, // zero_reg
118  1, // at
119  2, // v0
120  3, // v1
121  4, // a0
122  5, // a1
123  6, // a2
124  7, // a3
125  8, // a4
126  9, // a5
127  10, // a6
128  11, // a7
129  12, // t0
130  13, // t1
131  14, // t2
132  15, // t3
133  16, // s0
134  17, // s1
135  18, // s2
136  19, // s3
137  20, // s4
138  21, // s5
139  22, // s6
140  23, // s7
141  24, // t8
142  25, // t9
143  26, // k0
144  27, // k1
145  28, // gp
146  29, // sp
147  30, // fp
148  31, // ra
149  };
150  return kNumbers[reg.code()];
151 }
152 
153 
154 Register ToRegister(int num) {
155  DCHECK(num >= 0 && num < kNumRegisters);
156  const Register kRegisters[] = {
157  zero_reg,
158  at,
159  v0, v1,
160  a0, a1, a2, a3, a4, a5, a6, a7,
161  t0, t1, t2, t3,
162  s0, s1, s2, s3, s4, s5, s6, s7,
163  t8, t9,
164  k0, k1,
165  gp,
166  sp,
167  fp,
168  ra
169  };
170  return kRegisters[num];
171 }
172 
173 
174 // -----------------------------------------------------------------------------
175 // Implementation of RelocInfo.
176 
179 
180 
182  // The deserializer needs to know whether a pointer is specially coded. Being
183  // specially coded on MIPS means that it is a lui/ori instruction, and that is
184  // always the case inside code objects.
185  return true;
186 }
187 
188 
190  return false;
191 }
192 
193 
194 // Patch the code at the current address with the supplied instructions.
195 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
196  Instr* pc = reinterpret_cast<Instr*>(pc_);
197  Instr* instr = reinterpret_cast<Instr*>(instructions);
198  for (int i = 0; i < instruction_count; i++) {
199  *(pc + i) = *(instr + i);
200  }
201 
202  // Indicate that code has changed.
203  CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
204 }
205 
206 
207 // Patch the code at the current PC with a call to the target address.
208 // Additional guard instructions can be added if required.
209 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
210  // Patch the code at the current address with a call to the target.
212 }
213 
214 
215 // -----------------------------------------------------------------------------
216 // Implementation of Operand and MemOperand.
217 // See assembler-mips-inl.h for inlined constructors.
218 
219 Operand::Operand(Handle<Object> handle) {
220  AllowDeferredHandleDereference using_raw_address;
221  rm_ = no_reg;
222  // Verify all Objects referred by code are NOT in new space.
223  Object* obj = *handle;
224  if (obj->IsHeapObject()) {
225  DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
226  imm64_ = reinterpret_cast<intptr_t>(handle.location());
228  } else {
229  // No relocation needed.
230  imm64_ = reinterpret_cast<intptr_t>(obj);
231  rmode_ = RelocInfo::NONE64;
232  }
233 }
234 
235 
236 MemOperand::MemOperand(Register rm, int64_t offset) : Operand(rm) {
237  offset_ = offset;
238 }
239 
240 
241 MemOperand::MemOperand(Register rm, int64_t unit, int64_t multiplier,
242  OffsetAddend offset_addend) : Operand(rm) {
243  offset_ = unit * multiplier + offset_addend;
244 }
245 
246 
247 // -----------------------------------------------------------------------------
248 // Specific instructions, constants, and masks.
249 
250 static const int kNegOffset = 0x00008000;
251 // daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
252 // operations as post-increment of sp.
255  | (kPointerSize & kImm16Mask); // NOLINT
256 // daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
259  | (-kPointerSize & kImm16Mask); // NOLINT
260 // sd(r, MemOperand(sp, 0))
262  | (0 & kImm16Mask); // NOLINT
263 // ld(r, MemOperand(sp, 0))
265  | (0 & kImm16Mask); // NOLINT
266 
268  | (0 & kImm16Mask); // NOLINT
269 
271  | (0 & kImm16Mask); // NOLINT
272 
274  | (kNegOffset & kImm16Mask); // NOLINT
275 
277  | (kNegOffset & kImm16Mask); // NOLINT
278 // A mask for the Rt register for push, pop, lw, sw instructions.
279 const Instr kRtMask = kRtFieldMask;
280 const Instr kLwSwInstrTypeMask = 0xffe00000;
283 
284 
285 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
286  : AssemblerBase(isolate, buffer, buffer_size),
287  recorded_ast_id_(TypeFeedbackId::None()),
288  positions_recorder_(this) {
289  reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
290 
291  last_trampoline_pool_end_ = 0;
292  no_trampoline_pool_before_ = 0;
293  trampoline_pool_blocked_nesting_ = 0;
294  // We leave space (16 * kTrampolineSlotsSize)
295  // for BlockTrampolinePoolScope buffer.
296  next_buffer_check_ = FLAG_force_long_branches
297  ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
298  internal_trampoline_exception_ = false;
299  last_bound_pos_ = 0;
300 
301  trampoline_emitted_ = FLAG_force_long_branches;
302  unbound_labels_count_ = 0;
303  block_buffer_growth_ = false;
304 
305  ClearRecordedAstId();
306 }
307 
308 
309 void Assembler::GetCode(CodeDesc* desc) {
310  DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
311  // Set up code descriptor.
312  desc->buffer = buffer_;
313  desc->buffer_size = buffer_size_;
314  desc->instr_size = pc_offset();
315  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
316  desc->origin = this;
317 }
318 
319 
320 void Assembler::Align(int m) {
321  DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
322  while ((pc_offset() & (m - 1)) != 0) {
323  nop();
324  }
325 }
326 
327 
328 void Assembler::CodeTargetAlign() {
329  // No advantage to aligning branch/call targets to more than
330  // single instruction, that I am aware of.
331  Align(4);
332 }
333 
334 
335 Register Assembler::GetRtReg(Instr instr) {
336  Register rt;
337  rt.code_ = (instr & kRtFieldMask) >> kRtShift;
338  return rt;
339 }
340 
341 
342 Register Assembler::GetRsReg(Instr instr) {
343  Register rs;
344  rs.code_ = (instr & kRsFieldMask) >> kRsShift;
345  return rs;
346 }
347 
348 
349 Register Assembler::GetRdReg(Instr instr) {
350  Register rd;
351  rd.code_ = (instr & kRdFieldMask) >> kRdShift;
352  return rd;
353 }
354 
355 
356 uint32_t Assembler::GetRt(Instr instr) {
357  return (instr & kRtFieldMask) >> kRtShift;
358 }
359 
360 
361 uint32_t Assembler::GetRtField(Instr instr) {
362  return instr & kRtFieldMask;
363 }
364 
365 
366 uint32_t Assembler::GetRs(Instr instr) {
367  return (instr & kRsFieldMask) >> kRsShift;
368 }
369 
370 
371 uint32_t Assembler::GetRsField(Instr instr) {
372  return instr & kRsFieldMask;
373 }
374 
375 
376 uint32_t Assembler::GetRd(Instr instr) {
377  return (instr & kRdFieldMask) >> kRdShift;
378 }
379 
380 
381 uint32_t Assembler::GetRdField(Instr instr) {
382  return instr & kRdFieldMask;
383 }
384 
385 
386 uint32_t Assembler::GetSa(Instr instr) {
387  return (instr & kSaFieldMask) >> kSaShift;
388 }
389 
390 
391 uint32_t Assembler::GetSaField(Instr instr) {
392  return instr & kSaFieldMask;
393 }
394 
395 
396 uint32_t Assembler::GetOpcodeField(Instr instr) {
397  return instr & kOpcodeMask;
398 }
399 
400 
401 uint32_t Assembler::GetFunction(Instr instr) {
402  return (instr & kFunctionFieldMask) >> kFunctionShift;
403 }
404 
405 
406 uint32_t Assembler::GetFunctionField(Instr instr) {
407  return instr & kFunctionFieldMask;
408 }
409 
410 
411 uint32_t Assembler::GetImmediate16(Instr instr) {
412  return instr & kImm16Mask;
413 }
414 
415 
416 uint32_t Assembler::GetLabelConst(Instr instr) {
417  return instr & ~kImm16Mask;
418 }
419 
420 
421 bool Assembler::IsPop(Instr instr) {
422  return (instr & ~kRtMask) == kPopRegPattern;
423 }
424 
425 
426 bool Assembler::IsPush(Instr instr) {
427  return (instr & ~kRtMask) == kPushRegPattern;
428 }
429 
430 
431 bool Assembler::IsSwRegFpOffset(Instr instr) {
432  return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
433 }
434 
435 
436 bool Assembler::IsLwRegFpOffset(Instr instr) {
437  return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
438 }
439 
440 
441 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
442  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
444 }
445 
446 
447 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
448  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
450 }
451 
452 
453 // Labels refer to positions in the (to be) generated code.
454 // There are bound, linked, and unused labels.
455 //
456 // Bound labels refer to known positions in the already
457 // generated code. pos() is the position the label refers to.
458 //
459 // Linked labels refer to unknown positions in the code
460 // to be generated; pos() is the position of the last
461 // instruction using the label.
462 
463 // The link chain is terminated by a value in the instruction of -1,
464 // which is an otherwise illegal value (branch -1 is inf loop).
465 // The instruction 16-bit offset field addresses 32-bit words, but in
466 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
467 
468 const int kEndOfChain = -4;
469 // Determines the end of the Jump chain (a subset of the label link chain).
470 const int kEndOfJumpChain = 0;
471 
472 
473 bool Assembler::IsBranch(Instr instr) {
474  uint32_t opcode = GetOpcodeField(instr);
475  uint32_t rt_field = GetRtField(instr);
476  uint32_t rs_field = GetRsField(instr);
477  // Checks if the instruction is a branch.
478  return opcode == BEQ ||
479  opcode == BNE ||
480  opcode == BLEZ ||
481  opcode == BGTZ ||
482  opcode == BEQL ||
483  opcode == BNEL ||
484  opcode == BLEZL ||
485  opcode == BGTZL ||
486  (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
487  rt_field == BLTZAL || rt_field == BGEZAL)) ||
488  (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
489  (opcode == COP1 && rs_field == BC1EQZ) ||
490  (opcode == COP1 && rs_field == BC1NEZ);
491 }
492 
493 
494 bool Assembler::IsEmittedConstant(Instr instr) {
495  uint32_t label_constant = GetLabelConst(instr);
496  return label_constant == 0; // Emitted label const in reg-exp engine.
497 }
498 
499 
500 bool Assembler::IsBeq(Instr instr) {
501  return GetOpcodeField(instr) == BEQ;
502 }
503 
504 
505 bool Assembler::IsBne(Instr instr) {
506  return GetOpcodeField(instr) == BNE;
507 }
508 
509 
510 bool Assembler::IsJump(Instr instr) {
511  uint32_t opcode = GetOpcodeField(instr);
512  uint32_t rt_field = GetRtField(instr);
513  uint32_t rd_field = GetRdField(instr);
514  uint32_t function_field = GetFunctionField(instr);
515  // Checks if the instruction is a jump.
516  return opcode == J || opcode == JAL ||
517  (opcode == SPECIAL && rt_field == 0 &&
518  ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
519 }
520 
521 
522 bool Assembler::IsJ(Instr instr) {
523  uint32_t opcode = GetOpcodeField(instr);
524  // Checks if the instruction is a jump.
525  return opcode == J;
526 }
527 
528 
529 bool Assembler::IsJal(Instr instr) {
530  return GetOpcodeField(instr) == JAL;
531 }
532 
533 
534 bool Assembler::IsJr(Instr instr) {
535  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
536 }
537 
538 
539 bool Assembler::IsJalr(Instr instr) {
540  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
541 }
542 
543 
544 bool Assembler::IsLui(Instr instr) {
545  uint32_t opcode = GetOpcodeField(instr);
546  // Checks if the instruction is a load upper immediate.
547  return opcode == LUI;
548 }
549 
550 
551 bool Assembler::IsOri(Instr instr) {
552  uint32_t opcode = GetOpcodeField(instr);
553  // Checks if the instruction is a load upper immediate.
554  return opcode == ORI;
555 }
556 
557 
558 bool Assembler::IsNop(Instr instr, unsigned int type) {
559  // See Assembler::nop(type).
560  DCHECK(type < 32);
561  uint32_t opcode = GetOpcodeField(instr);
562  uint32_t function = GetFunctionField(instr);
563  uint32_t rt = GetRt(instr);
564  uint32_t rd = GetRd(instr);
565  uint32_t sa = GetSa(instr);
566 
567  // Traditional mips nop == sll(zero_reg, zero_reg, 0)
568  // When marking non-zero type, use sll(zero_reg, at, type)
569  // to avoid use of mips ssnop and ehb special encodings
570  // of the sll instruction.
571 
572  Register nop_rt_reg = (type == 0) ? zero_reg : at;
573  bool ret = (opcode == SPECIAL && function == SLL &&
574  rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
575  rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
576  sa == type);
577 
578  return ret;
579 }
580 
581 
582 int32_t Assembler::GetBranchOffset(Instr instr) {
583  DCHECK(IsBranch(instr));
584  return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
585 }
586 
587 
588 bool Assembler::IsLw(Instr instr) {
589  return ((instr & kOpcodeMask) == LW);
590 }
591 
592 
593 int16_t Assembler::GetLwOffset(Instr instr) {
594  DCHECK(IsLw(instr));
595  return ((instr & kImm16Mask));
596 }
597 
598 
599 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
600  DCHECK(IsLw(instr));
601 
602  // We actually create a new lw instruction based on the original one.
603  Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
604  | (offset & kImm16Mask);
605 
606  return temp_instr;
607 }
608 
609 
610 bool Assembler::IsSw(Instr instr) {
611  return ((instr & kOpcodeMask) == SW);
612 }
613 
614 
615 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
616  DCHECK(IsSw(instr));
617  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
618 }
619 
620 
621 bool Assembler::IsAddImmediate(Instr instr) {
622  return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
623 }
624 
625 
626 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
627  DCHECK(IsAddImmediate(instr));
628  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
629 }
630 
631 
632 bool Assembler::IsAndImmediate(Instr instr) {
633  return GetOpcodeField(instr) == ANDI;
634 }
635 
636 
637 int64_t Assembler::target_at(int64_t pos) {
638  Instr instr = instr_at(pos);
639  if ((instr & ~kImm16Mask) == 0) {
640  // Emitted label constant, not part of a branch.
641  if (instr == 0) {
642  return kEndOfChain;
643  } else {
644  int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
645  return (imm18 + pos);
646  }
647  }
648  // Check we have a branch or jump instruction.
649  DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
650  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
651  // the compiler uses arithmetic shifts for signed integers.
652  if (IsBranch(instr)) {
653  int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
654  if (imm18 == kEndOfChain) {
655  // EndOfChain sentinel is returned directly, not relative to pc or pos.
656  return kEndOfChain;
657  } else {
658  return pos + kBranchPCOffset + imm18;
659  }
660  } else if (IsLui(instr)) {
661  Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
662  Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
663  Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
664  DCHECK(IsOri(instr_ori));
665  DCHECK(IsOri(instr_ori2));
666 
667  // TODO(plind) create named constants for shift values.
668  int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
669  imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
670  imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
671  // Sign extend address;
672  imm >>= 16;
673 
674  if (imm == kEndOfJumpChain) {
675  // EndOfChain sentinel is returned directly, not relative to pc or pos.
676  return kEndOfChain;
677  } else {
678  uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
679  int64_t delta = instr_address - imm;
680  DCHECK(pos > delta);
681  return pos - delta;
682  }
683  } else {
684  int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
685  if (imm28 == kEndOfJumpChain) {
686  // EndOfChain sentinel is returned directly, not relative to pc or pos.
687  return kEndOfChain;
688  } else {
689  uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
690  instr_address &= kImm28Mask;
691  int64_t delta = instr_address - imm28;
692  DCHECK(pos > delta);
693  return pos - delta;
694  }
695  }
696 }
697 
698 
699 void Assembler::target_at_put(int64_t pos, int64_t target_pos) {
700  Instr instr = instr_at(pos);
701  if ((instr & ~kImm16Mask) == 0) {
702  DCHECK(target_pos == kEndOfChain || target_pos >= 0);
703  // Emitted label constant, not part of a branch.
704  // Make label relative to Code* of generated Code object.
705  instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
706  return;
707  }
708 
709  DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
710  if (IsBranch(instr)) {
711  int32_t imm18 = target_pos - (pos + kBranchPCOffset);
712  DCHECK((imm18 & 3) == 0);
713 
714  instr &= ~kImm16Mask;
715  int32_t imm16 = imm18 >> 2;
716  DCHECK(is_int16(imm16));
717 
718  instr_at_put(pos, instr | (imm16 & kImm16Mask));
719  } else if (IsLui(instr)) {
720  Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
721  Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
722  Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
723  DCHECK(IsOri(instr_ori));
724  DCHECK(IsOri(instr_ori2));
725 
726  uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
727  DCHECK((imm & 3) == 0);
728 
729  instr_lui &= ~kImm16Mask;
730  instr_ori &= ~kImm16Mask;
731  instr_ori2 &= ~kImm16Mask;
732 
733  instr_at_put(pos + 0 * Assembler::kInstrSize,
734  instr_lui | ((imm >> 32) & kImm16Mask));
735  instr_at_put(pos + 1 * Assembler::kInstrSize,
736  instr_ori | ((imm >> 16) & kImm16Mask));
737  instr_at_put(pos + 3 * Assembler::kInstrSize,
738  instr_ori2 | (imm & kImm16Mask));
739  } else {
740  uint64_t imm28 = reinterpret_cast<uint64_t>(buffer_) + target_pos;
741  imm28 &= kImm28Mask;
742  DCHECK((imm28 & 3) == 0);
743 
744  instr &= ~kImm26Mask;
745  uint32_t imm26 = imm28 >> 2;
746  DCHECK(is_uint26(imm26));
747 
748  instr_at_put(pos, instr | (imm26 & kImm26Mask));
749  }
750 }
751 
752 
753 void Assembler::print(Label* L) {
754  if (L->is_unused()) {
755  PrintF("unused label\n");
756  } else if (L->is_bound()) {
757  PrintF("bound label to %d\n", L->pos());
758  } else if (L->is_linked()) {
759  Label l = *L;
760  PrintF("unbound label");
761  while (l.is_linked()) {
762  PrintF("@ %d ", l.pos());
763  Instr instr = instr_at(l.pos());
764  if ((instr & ~kImm16Mask) == 0) {
765  PrintF("value\n");
766  } else {
767  PrintF("%d\n", instr);
768  }
769  next(&l);
770  }
771  } else {
772  PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
773  }
774 }
775 
776 
777 void Assembler::bind_to(Label* L, int pos) {
778  DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
779  int32_t trampoline_pos = kInvalidSlotPos;
780  if (L->is_linked() && !trampoline_emitted_) {
781  unbound_labels_count_--;
782  next_buffer_check_ += kTrampolineSlotsSize;
783  }
784 
785  while (L->is_linked()) {
786  int32_t fixup_pos = L->pos();
787  int32_t dist = pos - fixup_pos;
788  next(L); // Call next before overwriting link with target at fixup_pos.
789  Instr instr = instr_at(fixup_pos);
790  if (IsBranch(instr)) {
791  if (dist > kMaxBranchOffset) {
792  if (trampoline_pos == kInvalidSlotPos) {
793  trampoline_pos = get_trampoline_entry(fixup_pos);
794  CHECK(trampoline_pos != kInvalidSlotPos);
795  }
796  DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
797  target_at_put(fixup_pos, trampoline_pos);
798  fixup_pos = trampoline_pos;
799  dist = pos - fixup_pos;
800  }
801  target_at_put(fixup_pos, pos);
802  } else {
803  DCHECK(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
804  target_at_put(fixup_pos, pos);
805  }
806  }
807  L->bind_to(pos);
808 
809  // Keep track of the last bound label so we don't eliminate any instructions
810  // before a bound label.
811  if (pos > last_bound_pos_)
812  last_bound_pos_ = pos;
813 }
814 
815 
816 void Assembler::bind(Label* L) {
817  DCHECK(!L->is_bound()); // Label can only be bound once.
818  bind_to(L, pc_offset());
819 }
820 
821 
822 void Assembler::next(Label* L) {
823  DCHECK(L->is_linked());
824  int link = target_at(L->pos());
825  if (link == kEndOfChain) {
826  L->Unuse();
827  } else {
828  DCHECK(link >= 0);
829  L->link_to(link);
830  }
831 }
832 
833 
834 bool Assembler::is_near(Label* L) {
835  if (L->is_bound()) {
836  return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
837  }
838  return false;
839 }
840 
841 
842 // We have to use a temporary register for things that can be relocated even
843 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
844 // space. There is no guarantee that the relocated location can be similarly
845 // encoded.
846 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
847  return !RelocInfo::IsNone(rmode);
848 }
849 
850 void Assembler::GenInstrRegister(Opcode opcode,
851  Register rs,
852  Register rt,
853  Register rd,
854  uint16_t sa,
855  SecondaryField func) {
856  DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
857  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
858  | (rd.code() << kRdShift) | (sa << kSaShift) | func;
859  emit(instr);
860 }
861 
862 
863 void Assembler::GenInstrRegister(Opcode opcode,
864  Register rs,
865  Register rt,
866  uint16_t msb,
867  uint16_t lsb,
868  SecondaryField func) {
869  DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
870  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
871  | (msb << kRdShift) | (lsb << kSaShift) | func;
872  emit(instr);
873 }
874 
875 
876 void Assembler::GenInstrRegister(Opcode opcode,
877  SecondaryField fmt,
878  FPURegister ft,
879  FPURegister fs,
880  FPURegister fd,
881  SecondaryField func) {
882  DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
883  Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
884  | (fd.code() << kFdShift) | func;
885  emit(instr);
886 }
887 
888 
889 void Assembler::GenInstrRegister(Opcode opcode,
890  FPURegister fr,
891  FPURegister ft,
892  FPURegister fs,
893  FPURegister fd,
894  SecondaryField func) {
895  DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
896  Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
897  | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
898  emit(instr);
899 }
900 
901 
902 void Assembler::GenInstrRegister(Opcode opcode,
903  SecondaryField fmt,
904  Register rt,
905  FPURegister fs,
906  FPURegister fd,
907  SecondaryField func) {
908  DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
909  Instr instr = opcode | fmt | (rt.code() << kRtShift)
910  | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
911  emit(instr);
912 }
913 
914 
915 void Assembler::GenInstrRegister(Opcode opcode,
916  SecondaryField fmt,
917  Register rt,
918  FPUControlRegister fs,
919  SecondaryField func) {
920  DCHECK(fs.is_valid() && rt.is_valid());
921  Instr instr =
922  opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
923  emit(instr);
924 }
925 
926 
927 // Instructions with immediate value.
928 // Registers are in the order of the instruction encoding, from left to right.
929 void Assembler::GenInstrImmediate(Opcode opcode,
930  Register rs,
931  Register rt,
932  int32_t j) {
933  DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
934  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
935  | (j & kImm16Mask);
936  emit(instr);
937 }
938 
939 
940 void Assembler::GenInstrImmediate(Opcode opcode,
941  Register rs,
942  SecondaryField SF,
943  int32_t j) {
944  DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
945  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
946  emit(instr);
947 }
948 
949 
950 void Assembler::GenInstrImmediate(Opcode opcode,
951  Register rs,
952  FPURegister ft,
953  int32_t j) {
954  DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
955  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
956  | (j & kImm16Mask);
957  emit(instr);
958 }
959 
960 
961 void Assembler::GenInstrJump(Opcode opcode,
962  uint32_t address) {
963  BlockTrampolinePoolScope block_trampoline_pool(this);
964  DCHECK(is_uint26(address));
965  Instr instr = opcode | address;
966  emit(instr);
967  BlockTrampolinePoolFor(1); // For associated delay slot.
968 }
969 
970 
971 // Returns the next free trampoline entry.
972 int32_t Assembler::get_trampoline_entry(int32_t pos) {
973  int32_t trampoline_entry = kInvalidSlotPos;
974  if (!internal_trampoline_exception_) {
975  if (trampoline_.start() > pos) {
976  trampoline_entry = trampoline_.take_slot();
977  }
978 
979  if (kInvalidSlotPos == trampoline_entry) {
980  internal_trampoline_exception_ = true;
981  }
982  }
983  return trampoline_entry;
984 }
985 
986 
987 uint64_t Assembler::jump_address(Label* L) {
988  int64_t target_pos;
989  if (L->is_bound()) {
990  target_pos = L->pos();
991  } else {
992  if (L->is_linked()) {
993  target_pos = L->pos(); // L's link.
994  L->link_to(pc_offset());
995  } else {
996  L->link_to(pc_offset());
997  return kEndOfJumpChain;
998  }
999  }
1000 
1001  uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
1002  DCHECK((imm & 3) == 0);
1003 
1004  return imm;
1005 }
1006 
1007 
1008 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1009  int32_t target_pos;
1010  if (L->is_bound()) {
1011  target_pos = L->pos();
1012  } else {
1013  if (L->is_linked()) {
1014  target_pos = L->pos();
1015  L->link_to(pc_offset());
1016  } else {
1017  L->link_to(pc_offset());
1018  if (!trampoline_emitted_) {
1019  unbound_labels_count_++;
1020  next_buffer_check_ -= kTrampolineSlotsSize;
1021  }
1022  return kEndOfChain;
1023  }
1024  }
1025 
1026  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1027  DCHECK((offset & 3) == 0);
1028  DCHECK(is_int16(offset >> 2));
1029 
1030  return offset;
1031 }
1032 
1033 
1034 int32_t Assembler::branch_offset_compact(Label* L,
1035  bool jump_elimination_allowed) {
1036  int32_t target_pos;
1037  if (L->is_bound()) {
1038  target_pos = L->pos();
1039  } else {
1040  if (L->is_linked()) {
1041  target_pos = L->pos();
1042  L->link_to(pc_offset());
1043  } else {
1044  L->link_to(pc_offset());
1045  if (!trampoline_emitted_) {
1046  unbound_labels_count_++;
1047  next_buffer_check_ -= kTrampolineSlotsSize;
1048  }
1049  return kEndOfChain;
1050  }
1051  }
1052 
1053  int32_t offset = target_pos - pc_offset();
1054  DCHECK((offset & 3) == 0);
1055  DCHECK(is_int16(offset >> 2));
1056 
1057  return offset;
1058 }
1059 
1060 
1061 int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
1062  int32_t target_pos;
1063  if (L->is_bound()) {
1064  target_pos = L->pos();
1065  } else {
1066  if (L->is_linked()) {
1067  target_pos = L->pos();
1068  L->link_to(pc_offset());
1069  } else {
1070  L->link_to(pc_offset());
1071  if (!trampoline_emitted_) {
1072  unbound_labels_count_++;
1073  next_buffer_check_ -= kTrampolineSlotsSize;
1074  }
1075  return kEndOfChain;
1076  }
1077  }
1078 
1079  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1080  DCHECK((offset & 3) == 0);
1081  DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
1082 
1083  return offset;
1084 }
1085 
1086 
1087 int32_t Assembler::branch_offset21_compact(Label* L,
1088  bool jump_elimination_allowed) {
1089  int32_t target_pos;
1090  if (L->is_bound()) {
1091  target_pos = L->pos();
1092  } else {
1093  if (L->is_linked()) {
1094  target_pos = L->pos();
1095  L->link_to(pc_offset());
1096  } else {
1097  L->link_to(pc_offset());
1098  if (!trampoline_emitted_) {
1099  unbound_labels_count_++;
1100  next_buffer_check_ -= kTrampolineSlotsSize;
1101  }
1102  return kEndOfChain;
1103  }
1104  }
1105 
1106  int32_t offset = target_pos - pc_offset();
1107  DCHECK((offset & 3) == 0);
1108  DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
1109 
1110  return offset;
1111 }
1112 
1113 
1114 void Assembler::label_at_put(Label* L, int at_offset) {
1115  int target_pos;
1116  if (L->is_bound()) {
1117  target_pos = L->pos();
1118  instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1119  } else {
1120  if (L->is_linked()) {
1121  target_pos = L->pos(); // L's link.
1122  int32_t imm18 = target_pos - at_offset;
1123  DCHECK((imm18 & 3) == 0);
1124  int32_t imm16 = imm18 >> 2;
1125  DCHECK(is_int16(imm16));
1126  instr_at_put(at_offset, (imm16 & kImm16Mask));
1127  } else {
1128  target_pos = kEndOfChain;
1129  instr_at_put(at_offset, 0);
1130  if (!trampoline_emitted_) {
1131  unbound_labels_count_++;
1132  next_buffer_check_ -= kTrampolineSlotsSize;
1133  }
1134  }
1135  L->link_to(at_offset);
1136  }
1137 }
1138 
1139 
1140 //------- Branch and jump instructions --------
1141 
1142 void Assembler::b(int16_t offset) {
1143  beq(zero_reg, zero_reg, offset);
1144 }
1145 
1146 
1147 void Assembler::bal(int16_t offset) {
1148  positions_recorder()->WriteRecordedPositions();
1149  bgezal(zero_reg, offset);
1150 }
1151 
1152 
1153 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1154  BlockTrampolinePoolScope block_trampoline_pool(this);
1155  GenInstrImmediate(BEQ, rs, rt, offset);
1156  BlockTrampolinePoolFor(1); // For associated delay slot.
1157 }
1158 
1159 
1160 void Assembler::bgez(Register rs, int16_t offset) {
1161  BlockTrampolinePoolScope block_trampoline_pool(this);
1162  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1163  BlockTrampolinePoolFor(1); // For associated delay slot.
1164 }
1165 
1166 
1167 void Assembler::bgezc(Register rt, int16_t offset) {
1169  DCHECK(!(rt.is(zero_reg)));
1170  GenInstrImmediate(BLEZL, rt, rt, offset);
1171 }
1172 
1173 
1174 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1176  DCHECK(!(rs.is(zero_reg)));
1177  DCHECK(!(rt.is(zero_reg)));
1178  DCHECK(rs.code() != rt.code());
1179  GenInstrImmediate(BLEZ, rs, rt, offset);
1180 }
1181 
1182 
1183 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1185  DCHECK(!(rs.is(zero_reg)));
1186  DCHECK(!(rt.is(zero_reg)));
1187  DCHECK(rs.code() != rt.code());
1188  GenInstrImmediate(BLEZL, rs, rt, offset);
1189 }
1190 
1191 
1192 void Assembler::bgezal(Register rs, int16_t offset) {
1193  DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
1194  BlockTrampolinePoolScope block_trampoline_pool(this);
1195  positions_recorder()->WriteRecordedPositions();
1196  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1197  BlockTrampolinePoolFor(1); // For associated delay slot.
1198 }
1199 
1200 
1201 void Assembler::bgtz(Register rs, int16_t offset) {
1202  BlockTrampolinePoolScope block_trampoline_pool(this);
1203  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1204  BlockTrampolinePoolFor(1); // For associated delay slot.
1205 }
1206 
1207 
1208 void Assembler::bgtzc(Register rt, int16_t offset) {
1210  DCHECK(!(rt.is(zero_reg)));
1211  GenInstrImmediate(BGTZL, zero_reg, rt, offset);
1212 }
1213 
1214 
1215 void Assembler::blez(Register rs, int16_t offset) {
1216  BlockTrampolinePoolScope block_trampoline_pool(this);
1217  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1218  BlockTrampolinePoolFor(1); // For associated delay slot.
1219 }
1220 
1221 
1222 void Assembler::blezc(Register rt, int16_t offset) {
1224  DCHECK(!(rt.is(zero_reg)));
1225  GenInstrImmediate(BLEZL, zero_reg, rt, offset);
1226 }
1227 
1228 
1229 void Assembler::bltzc(Register rt, int16_t offset) {
1231  DCHECK(!(rt.is(zero_reg)));
1232  GenInstrImmediate(BGTZL, rt, rt, offset);
1233 }
1234 
1235 
1236 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1238  DCHECK(!(rs.is(zero_reg)));
1239  DCHECK(!(rt.is(zero_reg)));
1240  DCHECK(rs.code() != rt.code());
1241  GenInstrImmediate(BGTZ, rs, rt, offset);
1242 }
1243 
1244 
1245 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1247  DCHECK(!(rs.is(zero_reg)));
1248  DCHECK(!(rt.is(zero_reg)));
1249  DCHECK(rs.code() != rt.code());
1250  GenInstrImmediate(BGTZL, rs, rt, offset);
1251 }
1252 
1253 
1254 void Assembler::bltz(Register rs, int16_t offset) {
1255  BlockTrampolinePoolScope block_trampoline_pool(this);
1256  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1257  BlockTrampolinePoolFor(1); // For associated delay slot.
1258 }
1259 
1260 
1261 void Assembler::bltzal(Register rs, int16_t offset) {
1262  DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
1263  BlockTrampolinePoolScope block_trampoline_pool(this);
1264  positions_recorder()->WriteRecordedPositions();
1265  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1266  BlockTrampolinePoolFor(1); // For associated delay slot.
1267 }
1268 
1269 
1270 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1271  BlockTrampolinePoolScope block_trampoline_pool(this);
1272  GenInstrImmediate(BNE, rs, rt, offset);
1273  BlockTrampolinePoolFor(1); // For associated delay slot.
1274 }
1275 
1276 
1277 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1279  DCHECK(!(rs.is(zero_reg)));
1280  DCHECK(rs.code() >= rt.code());
1281  GenInstrImmediate(ADDI, rs, rt, offset);
1282 }
1283 
1284 
1285 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1287  DCHECK(!(rs.is(zero_reg)));
1288  DCHECK(rs.code() >= rt.code());
1289  GenInstrImmediate(DADDI, rs, rt, offset);
1290 }
1291 
1292 
1293 void Assembler::blezalc(Register rt, int16_t offset) {
1295  DCHECK(!(rt.is(zero_reg)));
1296  GenInstrImmediate(BLEZ, zero_reg, rt, offset);
1297 }
1298 
1299 
1300 void Assembler::bgezalc(Register rt, int16_t offset) {
1302  DCHECK(!(rt.is(zero_reg)));
1303  GenInstrImmediate(BLEZ, rt, rt, offset);
1304 }
1305 
1306 
1307 void Assembler::bgezall(Register rs, int16_t offset) {
1309  DCHECK(!(rs.is(zero_reg)));
1310  GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1311 }
1312 
1313 
1314 void Assembler::bltzalc(Register rt, int16_t offset) {
1316  DCHECK(!(rt.is(zero_reg)));
1317  GenInstrImmediate(BGTZ, rt, rt, offset);
1318 }
1319 
1320 
1321 void Assembler::bgtzalc(Register rt, int16_t offset) {
1323  DCHECK(!(rt.is(zero_reg)));
1324  GenInstrImmediate(BGTZ, zero_reg, rt, offset);
1325 }
1326 
1327 
1328 void Assembler::beqzalc(Register rt, int16_t offset) {
1330  DCHECK(!(rt.is(zero_reg)));
1331  GenInstrImmediate(ADDI, zero_reg, rt, offset);
1332 }
1333 
1334 
1335 void Assembler::bnezalc(Register rt, int16_t offset) {
1337  DCHECK(!(rt.is(zero_reg)));
1338  GenInstrImmediate(DADDI, zero_reg, rt, offset);
1339 }
1340 
1341 
1342 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1344  DCHECK(rs.code() < rt.code());
1345  GenInstrImmediate(ADDI, rs, rt, offset);
1346 }
1347 
1348 
1349 void Assembler::beqzc(Register rs, int32_t offset) {
1351  DCHECK(!(rs.is(zero_reg)));
1352  Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
1353  emit(instr);
1354 }
1355 
1356 
1357 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1359  DCHECK(rs.code() < rt.code());
1360  GenInstrImmediate(DADDI, rs, rt, offset);
1361 }
1362 
1363 
1364 void Assembler::bnezc(Register rs, int32_t offset) {
1366  DCHECK(!(rs.is(zero_reg)));
1367  Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
1368  emit(instr);
1369 }
1370 
1371 
1372 void Assembler::j(int64_t target) {
1373 #if DEBUG
1374  // Get pc of delay slot.
1375  uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1376  bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1377  (kImm26Bits + kImmFieldShift)) == 0;
1378  DCHECK(in_range && ((target & 3) == 0));
1379 #endif
1380  GenInstrJump(J, target >> 2);
1381 }
1382 
1383 
1384 void Assembler::jr(Register rs) {
1385  if (kArchVariant != kMips64r6) {
1386  BlockTrampolinePoolScope block_trampoline_pool(this);
1387  if (rs.is(ra)) {
1388  positions_recorder()->WriteRecordedPositions();
1389  }
1390  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1391  BlockTrampolinePoolFor(1); // For associated delay slot.
1392  } else {
1393  jalr(rs, zero_reg);
1394  }
1395 }
1396 
1397 
1398 void Assembler::jal(int64_t target) {
1399 #ifdef DEBUG
1400  // Get pc of delay slot.
1401  uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1402  bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1403  (kImm26Bits + kImmFieldShift)) == 0;
1404  DCHECK(in_range && ((target & 3) == 0));
1405 #endif
1406  positions_recorder()->WriteRecordedPositions();
1407  GenInstrJump(JAL, target >> 2);
1408 }
1409 
1410 
1411 void Assembler::jalr(Register rs, Register rd) {
1412  BlockTrampolinePoolScope block_trampoline_pool(this);
1413  positions_recorder()->WriteRecordedPositions();
1414  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1415  BlockTrampolinePoolFor(1); // For associated delay slot.
1416 }
1417 
1418 
1419 void Assembler::j_or_jr(int64_t target, Register rs) {
1420  // Get pc of delay slot.
1421  uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1422  bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1423  (kImm26Bits + kImmFieldShift)) == 0;
1424  if (in_range) {
1425  j(target);
1426  } else {
1427  jr(t9);
1428  }
1429 }
1430 
1431 
1432 void Assembler::jal_or_jalr(int64_t target, Register rs) {
1433  // Get pc of delay slot.
1434  uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1435  bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1436  (kImm26Bits+kImmFieldShift)) == 0;
1437  if (in_range) {
1438  jal(target);
1439  } else {
1440  jalr(t9);
1441  }
1442 }
1443 
1444 
1445 // -------Data-processing-instructions---------
1446 
1447 // Arithmetic.
1448 
1449 void Assembler::addu(Register rd, Register rs, Register rt) {
1450  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1451 }
1452 
1453 
1454 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1455  GenInstrImmediate(ADDIU, rs, rd, j);
1456 }
1457 
1458 
1459 void Assembler::subu(Register rd, Register rs, Register rt) {
1460  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1461 }
1462 
1463 
1464 void Assembler::mul(Register rd, Register rs, Register rt) {
1465  if (kArchVariant == kMips64r6) {
1466  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1467  } else {
1468  GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1469  }
1470 }
1471 
1472 
1473 void Assembler::muh(Register rd, Register rs, Register rt) {
1475  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1476 }
1477 
1478 
1479 void Assembler::mulu(Register rd, Register rs, Register rt) {
1481  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1482 }
1483 
1484 
1485 void Assembler::muhu(Register rd, Register rs, Register rt) {
1487  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1488 }
1489 
1490 
1491 void Assembler::dmul(Register rd, Register rs, Register rt) {
1493  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
1494 }
1495 
1496 
1497 void Assembler::dmuh(Register rd, Register rs, Register rt) {
1499  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
1500 }
1501 
1502 
1503 void Assembler::dmulu(Register rd, Register rs, Register rt) {
1505  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
1506 }
1507 
1508 
1509 void Assembler::dmuhu(Register rd, Register rs, Register rt) {
1511  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
1512 }
1513 
1514 
1515 void Assembler::mult(Register rs, Register rt) {
1517  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1518 }
1519 
1520 
1521 void Assembler::multu(Register rs, Register rt) {
1523  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1524 }
1525 
1526 
1527 void Assembler::daddiu(Register rd, Register rs, int32_t j) {
1528  GenInstrImmediate(DADDIU, rs, rd, j);
1529 }
1530 
1531 
1532 void Assembler::div(Register rs, Register rt) {
1533  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1534 }
1535 
1536 
1537 void Assembler::div(Register rd, Register rs, Register rt) {
1539  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1540 }
1541 
1542 
1543 void Assembler::mod(Register rd, Register rs, Register rt) {
1545  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1546 }
1547 
1548 
1549 void Assembler::divu(Register rs, Register rt) {
1550  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1551 }
1552 
1553 
1554 void Assembler::divu(Register rd, Register rs, Register rt) {
1556  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1557 }
1558 
1559 
1560 void Assembler::modu(Register rd, Register rs, Register rt) {
1562  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1563 }
1564 
1565 
1566 void Assembler::daddu(Register rd, Register rs, Register rt) {
1567  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
1568 }
1569 
1570 
1571 void Assembler::dsubu(Register rd, Register rs, Register rt) {
1572  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
1573 }
1574 
1575 
1576 void Assembler::dmult(Register rs, Register rt) {
1577  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
1578 }
1579 
1580 
1581 void Assembler::dmultu(Register rs, Register rt) {
1582  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
1583 }
1584 
1585 
1586 void Assembler::ddiv(Register rs, Register rt) {
1587  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
1588 }
1589 
1590 
1591 void Assembler::ddiv(Register rd, Register rs, Register rt) {
1593  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
1594 }
1595 
1596 
1597 void Assembler::dmod(Register rd, Register rs, Register rt) {
1599  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
1600 }
1601 
1602 
1603 void Assembler::ddivu(Register rs, Register rt) {
1604  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
1605 }
1606 
1607 
1608 void Assembler::ddivu(Register rd, Register rs, Register rt) {
1610  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
1611 }
1612 
1613 
1614 void Assembler::dmodu(Register rd, Register rs, Register rt) {
1616  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
1617 }
1618 
1619 
1620 // Logical.
1621 
1622 void Assembler::and_(Register rd, Register rs, Register rt) {
1623  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1624 }
1625 
1626 
1627 void Assembler::andi(Register rt, Register rs, int32_t j) {
1628  DCHECK(is_uint16(j));
1629  GenInstrImmediate(ANDI, rs, rt, j);
1630 }
1631 
1632 
1633 void Assembler::or_(Register rd, Register rs, Register rt) {
1634  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1635 }
1636 
1637 
1638 void Assembler::ori(Register rt, Register rs, int32_t j) {
1639  DCHECK(is_uint16(j));
1640  GenInstrImmediate(ORI, rs, rt, j);
1641 }
1642 
1643 
1644 void Assembler::xor_(Register rd, Register rs, Register rt) {
1645  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1646 }
1647 
1648 
1649 void Assembler::xori(Register rt, Register rs, int32_t j) {
1650  DCHECK(is_uint16(j));
1651  GenInstrImmediate(XORI, rs, rt, j);
1652 }
1653 
1654 
1655 void Assembler::nor(Register rd, Register rs, Register rt) {
1656  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1657 }
1658 
1659 
1660 // Shifts.
1661 void Assembler::sll(Register rd,
1662  Register rt,
1663  uint16_t sa,
1664  bool coming_from_nop) {
1665  // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1666  // generated using the sll instruction. They must be generated using
1667  // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1668  // instructions.
1669  DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1670  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1671 }
1672 
1673 
1674 void Assembler::sllv(Register rd, Register rt, Register rs) {
1675  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1676 }
1677 
1678 
1679 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1680  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1681 }
1682 
1683 
1684 void Assembler::srlv(Register rd, Register rt, Register rs) {
1685  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1686 }
1687 
1688 
1689 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1690  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1691 }
1692 
1693 
1694 void Assembler::srav(Register rd, Register rt, Register rs) {
1695  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1696 }
1697 
1698 
1699 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1700  // Should be called via MacroAssembler::Ror.
1701  DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1703  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1704  | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1705  emit(instr);
1706 }
1707 
1708 
1709 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1710  // Should be called via MacroAssembler::Ror.
1711  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1713  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1714  | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1715  emit(instr);
1716 }
1717 
1718 
1719 void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
1720  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL);
1721 }
1722 
1723 
1724 void Assembler::dsllv(Register rd, Register rt, Register rs) {
1725  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
1726 }
1727 
1728 
1729 void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
1730  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL);
1731 }
1732 
1733 
1734 void Assembler::dsrlv(Register rd, Register rt, Register rs) {
1735  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
1736 }
1737 
1738 
1739 void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
1740  DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1741  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1742  | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
1743  emit(instr);
1744 }
1745 
1746 
1747 void Assembler::drotrv(Register rd, Register rt, Register rs) {
1748  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1749  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1750  | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
1751  emit(instr);
1752 }
1753 
1754 
1755 void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
1756  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA);
1757 }
1758 
1759 
1760 void Assembler::dsrav(Register rd, Register rt, Register rs) {
1761  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
1762 }
1763 
1764 
1765 void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
1766  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL32);
1767 }
1768 
1769 
1770 void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
1771  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL32);
1772 }
1773 
1774 
1775 void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
1776  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA32);
1777 }
1778 
1779 
1780 // ------------Memory-instructions-------------
1781 
1782 // Helper for base-reg + offset, when offset is larger than int16.
1783 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1784  DCHECK(!src.rm().is(at));
1785  DCHECK(is_int32(src.offset_));
1786  daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask);
1787  dsll(at, at, kLuiShift);
1788  ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1789  daddu(at, at, src.rm()); // Add base register.
1790 }
1791 
1792 
1793 void Assembler::lb(Register rd, const MemOperand& rs) {
1794  if (is_int16(rs.offset_)) {
1795  GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1796  } else { // Offset > 16 bits, use multiple instructions to load.
1797  LoadRegPlusOffsetToAt(rs);
1798  GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1799  }
1800 }
1801 
1802 
1803 void Assembler::lbu(Register rd, const MemOperand& rs) {
1804  if (is_int16(rs.offset_)) {
1805  GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1806  } else { // Offset > 16 bits, use multiple instructions to load.
1807  LoadRegPlusOffsetToAt(rs);
1808  GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1809  }
1810 }
1811 
1812 
1813 void Assembler::lh(Register rd, const MemOperand& rs) {
1814  if (is_int16(rs.offset_)) {
1815  GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1816  } else { // Offset > 16 bits, use multiple instructions to load.
1817  LoadRegPlusOffsetToAt(rs);
1818  GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
1819  }
1820 }
1821 
1822 
1823 void Assembler::lhu(Register rd, const MemOperand& rs) {
1824  if (is_int16(rs.offset_)) {
1825  GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1826  } else { // Offset > 16 bits, use multiple instructions to load.
1827  LoadRegPlusOffsetToAt(rs);
1828  GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
1829  }
1830 }
1831 
1832 
1833 void Assembler::lw(Register rd, const MemOperand& rs) {
1834  if (is_int16(rs.offset_)) {
1835  GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1836  } else { // Offset > 16 bits, use multiple instructions to load.
1837  LoadRegPlusOffsetToAt(rs);
1838  GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1839  }
1840 }
1841 
1842 
1843 void Assembler::lwu(Register rd, const MemOperand& rs) {
1844  if (is_int16(rs.offset_)) {
1845  GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
1846  } else { // Offset > 16 bits, use multiple instructions to load.
1847  LoadRegPlusOffsetToAt(rs);
1848  GenInstrImmediate(LWU, at, rd, 0); // Equiv to lwu(rd, MemOperand(at, 0));
1849  }
1850 }
1851 
1852 
1853 void Assembler::lwl(Register rd, const MemOperand& rs) {
1854  GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1855 }
1856 
1857 
1858 void Assembler::lwr(Register rd, const MemOperand& rs) {
1859  GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1860 }
1861 
1862 
1863 void Assembler::sb(Register rd, const MemOperand& rs) {
1864  if (is_int16(rs.offset_)) {
1865  GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1866  } else { // Offset > 16 bits, use multiple instructions to store.
1867  LoadRegPlusOffsetToAt(rs);
1868  GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
1869  }
1870 }
1871 
1872 
1873 void Assembler::sh(Register rd, const MemOperand& rs) {
1874  if (is_int16(rs.offset_)) {
1875  GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1876  } else { // Offset > 16 bits, use multiple instructions to store.
1877  LoadRegPlusOffsetToAt(rs);
1878  GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
1879  }
1880 }
1881 
1882 
1883 void Assembler::sw(Register rd, const MemOperand& rs) {
1884  if (is_int16(rs.offset_)) {
1885  GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1886  } else { // Offset > 16 bits, use multiple instructions to store.
1887  LoadRegPlusOffsetToAt(rs);
1888  GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1889  }
1890 }
1891 
1892 
1893 void Assembler::swl(Register rd, const MemOperand& rs) {
1894  GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1895 }
1896 
1897 
1898 void Assembler::swr(Register rd, const MemOperand& rs) {
1899  GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1900 }
1901 
1902 
1903 void Assembler::lui(Register rd, int32_t j) {
1904  DCHECK(is_uint16(j));
1905  GenInstrImmediate(LUI, zero_reg, rd, j);
1906 }
1907 
1908 
1909 void Assembler::aui(Register rs, Register rt, int32_t j) {
1910  // This instruction uses same opcode as 'lui'. The difference in encoding is
1911  // 'lui' has zero reg. for rs field.
1912  DCHECK(is_uint16(j));
1913  GenInstrImmediate(LUI, rs, rt, j);
1914 }
1915 
1916 
1917 void Assembler::daui(Register rs, Register rt, int32_t j) {
1918  DCHECK(is_uint16(j));
1919  GenInstrImmediate(DAUI, rs, rt, j);
1920 }
1921 
1922 
1923 void Assembler::dahi(Register rs, int32_t j) {
1924  DCHECK(is_uint16(j));
1925  GenInstrImmediate(REGIMM, rs, DAHI, j);
1926 }
1927 
1928 
1929 void Assembler::dati(Register rs, int32_t j) {
1930  DCHECK(is_uint16(j));
1931  GenInstrImmediate(REGIMM, rs, DATI, j);
1932 }
1933 
1934 
1935 void Assembler::ldl(Register rd, const MemOperand& rs) {
1936  GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
1937 }
1938 
1939 
1940 void Assembler::ldr(Register rd, const MemOperand& rs) {
1941  GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
1942 }
1943 
1944 
1945 void Assembler::sdl(Register rd, const MemOperand& rs) {
1946  GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
1947 }
1948 
1949 
1950 void Assembler::sdr(Register rd, const MemOperand& rs) {
1951  GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
1952 }
1953 
1954 
1955 void Assembler::ld(Register rd, const MemOperand& rs) {
1956  if (is_int16(rs.offset_)) {
1957  GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
1958  } else { // Offset > 16 bits, use multiple instructions to load.
1959  LoadRegPlusOffsetToAt(rs);
1960  GenInstrImmediate(LD, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1961  }
1962 }
1963 
1964 
1965 void Assembler::sd(Register rd, const MemOperand& rs) {
1966  if (is_int16(rs.offset_)) {
1967  GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
1968  } else { // Offset > 16 bits, use multiple instructions to store.
1969  LoadRegPlusOffsetToAt(rs);
1970  GenInstrImmediate(SD, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1971  }
1972 }
1973 
1974 
1975 // -------------Misc-instructions--------------
1976 
1977 // Break / Trap instructions.
1978 void Assembler::break_(uint32_t code, bool break_as_stop) {
1979  DCHECK((code & ~0xfffff) == 0);
1980  // We need to invalidate breaks that could be stops as well because the
1981  // simulator expects a char pointer after the stop instruction.
1982  // See constants-mips.h for explanation.
1983  DCHECK((break_as_stop &&
1984  code <= kMaxStopCode &&
1985  code > kMaxWatchpointCode) ||
1986  (!break_as_stop &&
1987  (code > kMaxStopCode ||
1988  code <= kMaxWatchpointCode)));
1989  Instr break_instr = SPECIAL | BREAK | (code << 6);
1990  emit(break_instr);
1991 }
1992 
1993 
1994 void Assembler::stop(const char* msg, uint32_t code) {
1995  DCHECK(code > kMaxWatchpointCode);
1996  DCHECK(code <= kMaxStopCode);
1997 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
1998  break_(0x54321);
1999 #else // V8_HOST_ARCH_MIPS
2000  BlockTrampolinePoolFor(3);
2001  // The Simulator will handle the stop instruction and get the message address.
2002  // On MIPS stop() is just a special kind of break_().
2003  break_(code, true);
2004  emit(reinterpret_cast<uint64_t>(msg));
2005 #endif
2006 }
2007 
2008 
2009 void Assembler::tge(Register rs, Register rt, uint16_t code) {
2010  DCHECK(is_uint10(code));
2011  Instr instr = SPECIAL | TGE | rs.code() << kRsShift
2012  | rt.code() << kRtShift | code << 6;
2013  emit(instr);
2014 }
2015 
2016 
2017 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2018  DCHECK(is_uint10(code));
2019  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
2020  | rt.code() << kRtShift | code << 6;
2021  emit(instr);
2022 }
2023 
2024 
2025 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2026  DCHECK(is_uint10(code));
2027  Instr instr =
2028  SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2029  emit(instr);
2030 }
2031 
2032 
2033 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2034  DCHECK(is_uint10(code));
2035  Instr instr =
2036  SPECIAL | TLTU | rs.code() << kRsShift
2037  | rt.code() << kRtShift | code << 6;
2038  emit(instr);
2039 }
2040 
2041 
2042 void Assembler::teq(Register rs, Register rt, uint16_t code) {
2043  DCHECK(is_uint10(code));
2044  Instr instr =
2045  SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2046  emit(instr);
2047 }
2048 
2049 
2050 void Assembler::tne(Register rs, Register rt, uint16_t code) {
2051  DCHECK(is_uint10(code));
2052  Instr instr =
2053  SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2054  emit(instr);
2055 }
2056 
2057 
2058 // Move from HI/LO register.
2059 
2060 void Assembler::mfhi(Register rd) {
2061  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2062 }
2063 
2064 
2065 void Assembler::mflo(Register rd) {
2066  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2067 }
2068 
2069 
2070 // Set on less than instructions.
2071 void Assembler::slt(Register rd, Register rs, Register rt) {
2072  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2073 }
2074 
2075 
2076 void Assembler::sltu(Register rd, Register rs, Register rt) {
2077  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2078 }
2079 
2080 
2081 void Assembler::slti(Register rt, Register rs, int32_t j) {
2082  GenInstrImmediate(SLTI, rs, rt, j);
2083 }
2084 
2085 
2086 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2087  GenInstrImmediate(SLTIU, rs, rt, j);
2088 }
2089 
2090 
2091 // Conditional move.
2092 void Assembler::movz(Register rd, Register rs, Register rt) {
2093  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2094 }
2095 
2096 
2097 void Assembler::movn(Register rd, Register rs, Register rt) {
2098  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2099 }
2100 
2101 
2102 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2103  Register rt;
2104  rt.code_ = (cc & 0x0007) << 2 | 1;
2105  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2106 }
2107 
2108 
2109 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2110  Register rt;
2111  rt.code_ = (cc & 0x0007) << 2 | 0;
2112  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2113 }
2114 
2115 
2116 void Assembler::sel(SecondaryField fmt, FPURegister fd,
2117  FPURegister ft, FPURegister fs, uint8_t sel) {
2119  DCHECK(fmt == D);
2120  DCHECK(fmt == S);
2121 
2122  Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
2123  fs.code() << kFsShift | fd.code() << kFdShift | SEL;
2124  emit(instr);
2125 }
2126 
2127 
2128 // GPR.
2129 void Assembler::seleqz(Register rs, Register rt, Register rd) {
2131  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2132 }
2133 
2134 
2135 // FPR.
2136 void Assembler::seleqz(SecondaryField fmt, FPURegister fd,
2137  FPURegister ft, FPURegister fs) {
2139  DCHECK(fmt == D);
2140  DCHECK(fmt == S);
2141 
2142  Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
2143  fs.code() << kFsShift | fd.code() << kFdShift | SELEQZ_C;
2144  emit(instr);
2145 }
2146 
2147 
2148 // GPR.
2149 void Assembler::selnez(Register rs, Register rt, Register rd) {
2151  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2152 }
2153 
2154 
2155 // FPR.
2156 void Assembler::selnez(SecondaryField fmt, FPURegister fd,
2157  FPURegister ft, FPURegister fs) {
2159  DCHECK(fmt == D);
2160  DCHECK(fmt == S);
2161 
2162  Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
2163  fs.code() << kFsShift | fd.code() << kFdShift | SELNEZ_C;
2164  emit(instr);
2165 }
2166 
2167 
2168 // Bit twiddling.
2169 void Assembler::clz(Register rd, Register rs) {
2170  if (kArchVariant != kMips64r6) {
2171  // Clz instr requires same GPR number in 'rd' and 'rt' fields.
2172  GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2173  } else {
2174  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2175  }
2176 }
2177 
2178 
2179 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2180  // Should be called via MacroAssembler::Ins.
2181  // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2183  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2184 }
2185 
2186 
2187 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2188  // Should be called via MacroAssembler::Ext.
2189  // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
2191  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2192 }
2193 
2194 
2195 void Assembler::pref(int32_t hint, const MemOperand& rs) {
2196  DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2197  Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
2198  | (rs.offset_);
2199  emit(instr);
2200 }
2201 
2202 
2203 // --------Coprocessor-instructions----------------
2204 
2205 // Load, store, move.
2206 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2207  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2208 }
2209 
2210 
2211 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
2212  GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
2213 }
2214 
2215 
2216 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
2217  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
2218 }
2219 
2220 
2221 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
2222  GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
2223 }
2224 
2225 
2226 void Assembler::mtc1(Register rt, FPURegister fs) {
2227  GenInstrRegister(COP1, MTC1, rt, fs, f0);
2228 }
2229 
2230 
2231 void Assembler::mthc1(Register rt, FPURegister fs) {
2232  GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2233 }
2234 
2235 
2236 void Assembler::dmtc1(Register rt, FPURegister fs) {
2237  GenInstrRegister(COP1, DMTC1, rt, fs, f0);
2238 }
2239 
2240 
2241 void Assembler::mfc1(Register rt, FPURegister fs) {
2242  GenInstrRegister(COP1, MFC1, rt, fs, f0);
2243 }
2244 
2245 
2246 void Assembler::mfhc1(Register rt, FPURegister fs) {
2247  GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2248 }
2249 
2250 
2251 void Assembler::dmfc1(Register rt, FPURegister fs) {
2252  GenInstrRegister(COP1, DMFC1, rt, fs, f0);
2253 }
2254 
2255 
2256 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2257  GenInstrRegister(COP1, CTC1, rt, fs);
2258 }
2259 
2260 
2261 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2262  GenInstrRegister(COP1, CFC1, rt, fs);
2263 }
2264 
2265 
2266 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2267  uint64_t i;
2268  memcpy(&i, &d, 8);
2269 
2270  *lo = i & 0xffffffff;
2271  *hi = i >> 32;
2272 }
2273 
2274 
2275 // Arithmetic.
2276 
2277 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2278  GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2279 }
2280 
2281 
2282 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2283  GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2284 }
2285 
2286 
2287 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2288  GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2289 }
2290 
2291 
2292 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2293  FPURegister ft) {
2294  GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2295 }
2296 
2297 
2298 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2299  GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2300 }
2301 
2302 
2303 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2304  GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2305 }
2306 
2307 
2308 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2309  GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2310 }
2311 
2312 
2313 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2314  GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2315 }
2316 
2317 
2318 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2319  GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2320 }
2321 
2322 
2323 // Conversions.
2324 
2325 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2326  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2327 }
2328 
2329 
2330 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2331  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2332 }
2333 
2334 
2335 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2336  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2337 }
2338 
2339 
2340 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2341  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2342 }
2343 
2344 
2345 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2346  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2347 }
2348 
2349 
2350 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2351  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2352 }
2353 
2354 
2355 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2356  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2357 }
2358 
2359 
2360 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2361  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2362 }
2363 
2364 
2365 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2366  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2367 }
2368 
2369 
2370 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2371  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2372 }
2373 
2374 
2375 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2377  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2378 }
2379 
2380 
2381 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
2383  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2384 }
2385 
2386 
2387 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
2389  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2390 }
2391 
2392 
2393 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
2395  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2396 }
2397 
2398 
2399 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2400  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2401 }
2402 
2403 
2404 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2405  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2406 }
2407 
2408 
2409 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
2410  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
2411 }
2412 
2413 
2414 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
2415  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
2416 }
2417 
2418 
2419 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
2420  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
2421 }
2422 
2423 
2424 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
2425  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
2426 }
2427 
2428 
2429 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
2430  FPURegister fs) {
2432  DCHECK((fmt == D) || (fmt == S));
2433  GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2434 }
2435 
2436 
2437 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
2438  FPURegister fs) {
2440  DCHECK((fmt == D) || (fmt == S));
2441  GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2442 }
2443 
2444 
2445 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
2446  FPURegister fs) {
2448  DCHECK((fmt == D) || (fmt == S));
2449  GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2450 }
2451 
2452 
2453 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
2454  FPURegister fs) {
2456  DCHECK((fmt == D) || (fmt == S));
2457  GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
2458 }
2459 
2460 
2461 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
2462  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
2463 }
2464 
2465 
2466 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
2468  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
2469 }
2470 
2471 
2472 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
2473  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
2474 }
2475 
2476 
2477 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
2478  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
2479 }
2480 
2481 
2482 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
2484  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
2485 }
2486 
2487 
2488 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
2489  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
2490 }
2491 
2492 
2493 // Conditions for >= MIPSr6.
2494 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
2495  FPURegister fd, FPURegister fs, FPURegister ft) {
2497  DCHECK((fmt & ~(31 << kRsShift)) == 0);
2498  Instr instr = COP1 | fmt | ft.code() << kFtShift |
2499  fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
2500  emit(instr);
2501 }
2502 
2503 
2504 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
2506  Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
2507  emit(instr);
2508 }
2509 
2510 
2511 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
2513  Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
2514  emit(instr);
2515 }
2516 
2517 
2518 // Conditions for < MIPSr6.
2519 void Assembler::c(FPUCondition cond, SecondaryField fmt,
2520  FPURegister fs, FPURegister ft, uint16_t cc) {
2522  DCHECK(is_uint3(cc));
2523  DCHECK((fmt & ~(31 << kRsShift)) == 0);
2524  Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
2525  | cc << 8 | 3 << 4 | cond;
2526  emit(instr);
2527 }
2528 
2529 
2530 void Assembler::fcmp(FPURegister src1, const double src2,
2531  FPUCondition cond) {
2532  DCHECK(src2 == 0.0);
2533  mtc1(zero_reg, f14);
2534  cvt_d_w(f14, f14);
2535  c(cond, D, src1, f14, 0);
2536 }
2537 
2538 
2539 void Assembler::bc1f(int16_t offset, uint16_t cc) {
2540  DCHECK(is_uint3(cc));
2541  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
2542  emit(instr);
2543 }
2544 
2545 
2546 void Assembler::bc1t(int16_t offset, uint16_t cc) {
2547  DCHECK(is_uint3(cc));
2548  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
2549  emit(instr);
2550 }
2551 
2552 
2553 // Debugging.
2554 void Assembler::RecordJSReturn() {
2555  positions_recorder()->WriteRecordedPositions();
2556  CheckBuffer();
2557  RecordRelocInfo(RelocInfo::JS_RETURN);
2558 }
2559 
2560 
2561 void Assembler::RecordDebugBreakSlot() {
2562  positions_recorder()->WriteRecordedPositions();
2563  CheckBuffer();
2564  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2565 }
2566 
2567 
2568 void Assembler::RecordComment(const char* msg) {
2569  if (FLAG_code_comments) {
2570  CheckBuffer();
2571  RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2572  }
2573 }
2574 
2575 
2576 int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
2577  Instr instr = instr_at(pc);
2578  DCHECK(IsJ(instr) || IsLui(instr));
2579  if (IsLui(instr)) {
2580  Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
2581  Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
2582  Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize);
2583  DCHECK(IsOri(instr_ori));
2584  DCHECK(IsOri(instr_ori2));
2585  // TODO(plind): symbolic names for the shifts.
2586  int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
2587  imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
2588  imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
2589  // Sign extend address.
2590  imm >>= 16;
2591 
2592  if (imm == kEndOfJumpChain) {
2593  return 0; // Number of instructions patched.
2594  }
2595  imm += pc_delta;
2596  DCHECK((imm & 3) == 0);
2597 
2598  instr_lui &= ~kImm16Mask;
2599  instr_ori &= ~kImm16Mask;
2600  instr_ori2 &= ~kImm16Mask;
2601 
2602  instr_at_put(pc + 0 * Assembler::kInstrSize,
2603  instr_lui | ((imm >> 32) & kImm16Mask));
2604  instr_at_put(pc + 1 * Assembler::kInstrSize,
2605  instr_ori | (imm >> 16 & kImm16Mask));
2606  instr_at_put(pc + 3 * Assembler::kInstrSize,
2607  instr_ori2 | (imm & kImm16Mask));
2608  return 4; // Number of instructions patched.
2609  } else {
2610  uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
2611  if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
2612  return 0; // Number of instructions patched.
2613  }
2614 
2615  imm28 += pc_delta;
2616  imm28 &= kImm28Mask;
2617  DCHECK((imm28 & 3) == 0);
2618 
2619  instr &= ~kImm26Mask;
2620  uint32_t imm26 = imm28 >> 2;
2621  DCHECK(is_uint26(imm26));
2622 
2623  instr_at_put(pc, instr | (imm26 & kImm26Mask));
2624  return 1; // Number of instructions patched.
2625  }
2626 }
2627 
2628 
2629 void Assembler::GrowBuffer() {
2630  if (!own_buffer_) FATAL("external code buffer is too small");
2631 
2632  // Compute new buffer size.
2633  CodeDesc desc; // The new buffer.
2634  if (buffer_size_ < 1 * MB) {
2635  desc.buffer_size = 2*buffer_size_;
2636  } else {
2637  desc.buffer_size = buffer_size_ + 1*MB;
2638  }
2639  CHECK_GT(desc.buffer_size, 0); // No overflow.
2640 
2641  // Set up new buffer.
2642  desc.buffer = NewArray<byte>(desc.buffer_size);
2643 
2644  desc.instr_size = pc_offset();
2645  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2646 
2647  // Copy the data.
2648  intptr_t pc_delta = desc.buffer - buffer_;
2649  intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
2650  (buffer_ + buffer_size_);
2651  MemMove(desc.buffer, buffer_, desc.instr_size);
2652  MemMove(reloc_info_writer.pos() + rc_delta,
2653  reloc_info_writer.pos(), desc.reloc_size);
2654 
2655  // Switch buffers.
2656  DeleteArray(buffer_);
2657  buffer_ = desc.buffer;
2658  buffer_size_ = desc.buffer_size;
2659  pc_ += pc_delta;
2660  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2661  reloc_info_writer.last_pc() + pc_delta);
2662 
2663  // Relocate runtime entries.
2664  for (RelocIterator it(desc); !it.done(); it.next()) {
2665  RelocInfo::Mode rmode = it.rinfo()->rmode();
2666  if (rmode == RelocInfo::INTERNAL_REFERENCE) {
2667  byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
2668  RelocateInternalReference(p, pc_delta);
2669  }
2670  }
2671 
2672  DCHECK(!overflow());
2673 }
2674 
2675 
2676 void Assembler::db(uint8_t data) {
2677  CheckBuffer();
2678  *reinterpret_cast<uint8_t*>(pc_) = data;
2679  pc_ += sizeof(uint8_t);
2680 }
2681 
2682 
2683 void Assembler::dd(uint32_t data) {
2684  CheckBuffer();
2685  *reinterpret_cast<uint32_t*>(pc_) = data;
2686  pc_ += sizeof(uint32_t);
2687 }
2688 
2689 
2690 void Assembler::emit_code_stub_address(Code* stub) {
2691  CheckBuffer();
2692  *reinterpret_cast<uint64_t*>(pc_) =
2693  reinterpret_cast<uint64_t>(stub->instruction_start());
2694  pc_ += sizeof(uint64_t);
2695 }
2696 
2697 
2698 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2699  // We do not try to reuse pool constants.
2700  RelocInfo rinfo(pc_, rmode, data, NULL);
2701  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2702  // Adjust code for new modes.
2703  DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
2704  || RelocInfo::IsJSReturn(rmode)
2705  || RelocInfo::IsComment(rmode)
2706  || RelocInfo::IsPosition(rmode));
2707  // These modes do not need an entry in the constant pool.
2708  }
2709  if (!RelocInfo::IsNone(rinfo.rmode())) {
2710  // Don't record external references unless the heap will be serialized.
2711  if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
2712  !serializer_enabled() && !emit_debug_code()) {
2713  return;
2714  }
2715  DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
2716  if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2717  RelocInfo reloc_info_with_ast_id(pc_,
2718  rmode,
2719  RecordedAstId().ToInt(),
2720  NULL);
2721  ClearRecordedAstId();
2722  reloc_info_writer.Write(&reloc_info_with_ast_id);
2723  } else {
2724  reloc_info_writer.Write(&rinfo);
2725  }
2726  }
2727 }
2728 
2729 
2730 void Assembler::BlockTrampolinePoolFor(int instructions) {
2731  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2732 }
2733 
2734 
2735 void Assembler::CheckTrampolinePool() {
2736  // Some small sequences of instructions must not be broken up by the
2737  // insertion of a trampoline pool; such sequences are protected by setting
2738  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2739  // which are both checked here. Also, recursive calls to CheckTrampolinePool
2740  // are blocked by trampoline_pool_blocked_nesting_.
2741  if ((trampoline_pool_blocked_nesting_ > 0) ||
2742  (pc_offset() < no_trampoline_pool_before_)) {
2743  // Emission is currently blocked; make sure we try again as soon as
2744  // possible.
2745  if (trampoline_pool_blocked_nesting_ > 0) {
2746  next_buffer_check_ = pc_offset() + kInstrSize;
2747  } else {
2748  next_buffer_check_ = no_trampoline_pool_before_;
2749  }
2750  return;
2751  }
2752 
2753  DCHECK(!trampoline_emitted_);
2754  DCHECK(unbound_labels_count_ >= 0);
2755  if (unbound_labels_count_ > 0) {
2756  // First we emit jump (2 instructions), then we emit trampoline pool.
2757  { BlockTrampolinePoolScope block_trampoline_pool(this);
2758  Label after_pool;
2759  b(&after_pool);
2760  nop();
2761 
2762  int pool_start = pc_offset();
2763  for (int i = 0; i < unbound_labels_count_; i++) {
2764  uint64_t imm64;
2765  imm64 = jump_address(&after_pool);
2766  { BlockGrowBufferScope block_buf_growth(this);
2767  // Buffer growth (and relocation) must be blocked for internal
2768  // references until associated instructions are emitted and available
2769  // to be patched.
2770  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2771  // TODO(plind): Verify this, presume I cannot use macro-assembler
2772  // here.
2773  lui(at, (imm64 >> 32) & kImm16Mask);
2774  ori(at, at, (imm64 >> 16) & kImm16Mask);
2775  dsll(at, at, 16);
2776  ori(at, at, imm64 & kImm16Mask);
2777  }
2778  jr(at);
2779  nop();
2780  }
2781  bind(&after_pool);
2782  trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2783 
2784  trampoline_emitted_ = true;
2785  // As we are only going to emit trampoline once, we need to prevent any
2786  // further emission.
2787  next_buffer_check_ = kMaxInt;
2788  }
2789  } else {
2790  // Number of branches to unbound label at this point is zero, so we can
2791  // move next buffer check to maximum.
2792  next_buffer_check_ = pc_offset() +
2793  kMaxBranchOffset - kTrampolineSlotsSize * 16;
2794  }
2795  return;
2796 }
2797 
2798 
2799 Address Assembler::target_address_at(Address pc) {
2800  Instr instr0 = instr_at(pc);
2801  Instr instr1 = instr_at(pc + 1 * kInstrSize);
2802  Instr instr3 = instr_at(pc + 3 * kInstrSize);
2803 
2804  // Interpret 4 instructions for address generated by li: See listing in
2805  // Assembler::set_target_address_at() just below.
2806  if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
2807  (GetOpcodeField(instr3) == ORI)) {
2808  // Assemble the 48 bit value.
2809  int64_t addr = static_cast<int64_t>(
2810  ((uint64_t)(GetImmediate16(instr0)) << 32) |
2811  ((uint64_t)(GetImmediate16(instr1)) << 16) |
2812  ((uint64_t)(GetImmediate16(instr3))));
2813 
2814  // Sign extend to get canonical address.
2815  addr = (addr << 16) >> 16;
2816  return reinterpret_cast<Address>(addr);
2817  }
2818  // We should never get here, force a bad address if we do.
2819  UNREACHABLE();
2820  return (Address)0x0;
2821 }
2822 
2823 
2824 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
2825 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
2826 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
2827 // OS::nan_value() returns a qNaN.
2828 void Assembler::QuietNaN(HeapObject* object) {
2829  HeapNumber::cast(object)->set_value(base::OS::nan_value());
2830 }
2831 
2832 
2833 // On Mips64, a target address is stored in a 4-instruction sequence:
2834 // 0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
2835 // 1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
2836 // 2: dsll(rd, rd, 16);
2837 // 3: ori(rd, rd, j.imm32_ & kImm16Mask);
2838 //
2839 // Patching the address must replace all the lui & ori instructions,
2840 // and flush the i-cache.
2841 //
2842 // There is an optimization below, which emits a nop when the address
2843 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
2844 // and possibly removed.
2845 void Assembler::set_target_address_at(Address pc,
2846  Address target,
2847  ICacheFlushMode icache_flush_mode) {
2848 // There is an optimization where only 4 instructions are used to load address
2849 // in code on MIP64 because only 48-bits of address is effectively used.
2850 // It relies on fact the upper [63:48] bits are not used for virtual address
2851 // translation and they have to be set according to value of bit 47 in order
2852 // get canonical address.
2853  Instr instr1 = instr_at(pc + kInstrSize);
2854  uint32_t rt_code = GetRt(instr1);
2855  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2856  uint64_t itarget = reinterpret_cast<uint64_t>(target);
2857 
2858 #ifdef DEBUG
2859  // Check we have the result from a li macro-instruction.
2860  Instr instr0 = instr_at(pc);
2861  Instr instr3 = instr_at(pc + kInstrSize * 3);
2862  CHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
2863  GetOpcodeField(instr3) == ORI));
2864 #endif
2865 
2866  // Must use 4 instructions to insure patchable code.
2867  // lui rt, upper-16.
2868  // ori rt, rt, lower-16.
2869  // dsll rt, rt, 16.
2870  // ori rt rt, lower-16.
2871  *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
2872  *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
2873  | ((itarget >> 16) & kImm16Mask);
2874  *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
2875  | (itarget & kImm16Mask);
2876 
2877  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
2878  CpuFeatures::FlushICache(pc, 4 * Assembler::kInstrSize);
2879  }
2880 }
2881 
2882 
2883 void Assembler::JumpLabelToJumpRegister(Address pc) {
2884  // Address pc points to lui/ori instructions.
2885  // Jump to label may follow at pc + 2 * kInstrSize.
2886  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2887 #ifdef DEBUG
2888  Instr instr1 = instr_at(pc);
2889 #endif
2890  Instr instr2 = instr_at(pc + 1 * kInstrSize);
2891  Instr instr3 = instr_at(pc + 6 * kInstrSize);
2892  bool patched = false;
2893 
2894  if (IsJal(instr3)) {
2895  DCHECK(GetOpcodeField(instr1) == LUI);
2896  DCHECK(GetOpcodeField(instr2) == ORI);
2897 
2898  uint32_t rs_field = GetRt(instr2) << kRsShift;
2899  uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2900  *(p+6) = SPECIAL | rs_field | rd_field | JALR;
2901  patched = true;
2902  } else if (IsJ(instr3)) {
2903  DCHECK(GetOpcodeField(instr1) == LUI);
2904  DCHECK(GetOpcodeField(instr2) == ORI);
2905 
2906  uint32_t rs_field = GetRt(instr2) << kRsShift;
2907  *(p+6) = SPECIAL | rs_field | JR;
2908  patched = true;
2909  }
2910 
2911  if (patched) {
2912  CpuFeatures::FlushICache(pc+6, sizeof(int32_t));
2913  }
2914 }
2915 
2916 
2917 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
2918  // No out-of-line constant pool support.
2919  DCHECK(!FLAG_enable_ool_constant_pool);
2920  return isolate->factory()->empty_constant_pool_array();
2921 }
2922 
2923 
2924 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2925  // No out-of-line constant pool support.
2926  DCHECK(!FLAG_enable_ool_constant_pool);
2927  return;
2928 }
2929 
2930 
2931 } } // namespace v8::internal
2932 
2933 #endif // V8_TARGET_ARCH_MIPS64
static const int kInstrSize
static void FlushICache(void *start, size_t size)
static unsigned supported_
Definition: assembler.h:205
static void PrintFeatures()
static void ProbeImpl(bool cross_compile)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
static const int kCodeTargetMask
Definition: assembler.h:587
static const int kApplyMask
Definition: assembler.h:591
void PatchCode(byte *instructions, int instruction_count)
byte * pc() const
Definition: assembler.h:457
void PatchCodeWithCall(Address target, int guard_bytes)
@ kMips64r6
@ kMips64r2
#define UNIMPLEMENTED_MIPS()
static const ArchVariants kArchVariant
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define CHECK(condition)
Definition: logging.h:36
#define FATAL(msg)
Definition: logging.h:26
#define CHECK_GT(a, b)
Definition: logging.h:177
#define DCHECK(condition)
Definition: logging.h:205
unsigned short uint16_t
Definition: unicode.cc:23
signed short int16_t
Definition: unicode.cc:22
int int32_t
Definition: unicode.cc:24
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
Matcher< Node * > IsBranch(const Matcher< Node * > &value_matcher, const Matcher< Node * > &control_matcher)
LinkageHelper< ArmLinkageHelperTraits > LH
Definition: linkage-arm.cc:36
const Instr kSwRegFpNegOffsetPattern
void DeleteArray(T *array)
Definition: allocation.h:68
const int kPointerSize
Definition: globals.h:129
const int kFunctionShift
const FPURegister f14
const Instr kPopInstruction
const int kRtShift
static int min(int a, int b)
Definition: liveedit.cc:273
const int kFsShift
const SwVfpRegister s1
const uint32_t kMaxStopCode
const uint32_t kMaxWatchpointCode
const int kRdShift
const int kImm26Bits
const SwVfpRegister s2
const int kFdShift
const SwVfpRegister s0
const Register fp
const Instr kLwSwInstrArgumentMask
const int kRsFieldMask
void MemMove(void *dest, const void *src, size_t size)
Definition: utils.h:353
int ToNumber(Register reg)
kSerializedDataOffset Object
Definition: objects-inl.h:5322
const int kMaxInt
Definition: globals.h:109
const Register sp
const int kFunctionFieldMask
const Instr kLwSwOffsetMask
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:146
const int kFtShift
const Instr kSwRegFpOffsetPattern
const int kRsShift
const SwVfpRegister s3
const Register pc
const Instr kLwRegFpNegOffsetPattern
const SwVfpRegister s6
byte * Address
Definition: globals.h:101
void PrintF(const char *format,...)
Definition: utils.cc:80
const SwVfpRegister s7
const int kImm28Mask
const int kHeapObjectTag
Definition: v8.h:5737
const Register no_reg
const int kRtFieldMask
Register ToRegister(int num)
const int kRegister_fp_Code
Definition: assembler-arm.h:87
const int kOpcodeMask
@ SKIP_ICACHE_FLUSH
Definition: assembler.h:293
const int kSaFieldMask
const int kLuiShift
const SwVfpRegister s4
const int kImmFieldShift
const Instr kPushRegPattern
const Instr kLwRegFpOffsetPattern
const Instr kPushInstruction
const Instr kPopRegPattern
const int kFrShift
const int kImm26Mask
const FPURegister f0
const int kSaShift
const Instr kLwSwInstrTypeMask
const SwVfpRegister s5
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:130
const Instr kRtMask
const int kNumRegisters
Definition: constants-arm.h:34
const int kRegister_sp_Code
Definition: assembler-arm.h:89
const int kRdFieldMask
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
@ None
Definition: v8.h:2211
static const uint16_t * Align(const uint16_t *chars)
Definition: api.cc:4266
const int MB
Definition: d8.cc:164
static const char * AllocationIndexToString(int index)
static const int kMaxNumAllocatableRegisters
#define S(x)
Definition: version.cc:55