V8 Project
macro-assembler-mips.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
6 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
7 
8 #include "src/assembler.h"
9 #include "src/globals.h"
11 
12 namespace v8 {
13 namespace internal {
14 
15 // Forward declaration.
16 class JumpTarget;
17 
18 // Reserved Register Usage Summary.
19 //
20 // Registers t8, t9, and at are reserved for use by the MacroAssembler.
21 //
22 // The programmer should know that the MacroAssembler may clobber these three,
23 // but won't touch other registers except in special cases.
24 //
25 // Per the MIPS ABI, register t9 must be used for indirect function call
26 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
27 // trying to update gp register for position-independent-code. Whenever
28 // MIPS generated code calls C code, it must be via t9 register.
29 
30 
31 // Flags used for LeaveExitFrame function.
33  EMIT_RETURN = true,
34  NO_EMIT_RETURN = false
35 };
36 
37 // Flags used for AllocateHeapNumber
39  // Tag the result.
40  TAG_RESULT,
41  // Don't tag
43 };
44 
45 // Flags used for the ObjectToDoubleFPURegister function.
47  // No special flags.
49  // Object is known to be a non smi.
50  OBJECT_NOT_SMI = 1 << 0,
51  // Don't load NaNs or infinities, branch to the non number case instead.
53 };
54 
55 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
58  PROTECT
59 };
60 
61 // Flags used for the li macro-assembler function.
62 enum LiFlags {
63  // If the constant value can be represented in just 16 bits, then
64  // optimize the li to use a single instruction, rather than lui/ori pair.
66  // Always use 2 instructions (lui/ori pair), even if the constant could
67  // be loaded with just one, so that this value is patchable later.
68  CONSTANT_SIZE = 1
69 };
70 
71 
77 };
79 
80 Register GetRegisterThatIsNotOneOf(Register reg1,
81  Register reg2 = no_reg,
82  Register reg3 = no_reg,
83  Register reg4 = no_reg,
84  Register reg5 = no_reg,
85  Register reg6 = no_reg);
86 
87 bool AreAliased(Register reg1,
88  Register reg2,
89  Register reg3 = no_reg,
90  Register reg4 = no_reg,
91  Register reg5 = no_reg,
92  Register reg6 = no_reg,
93  Register reg7 = no_reg,
94  Register reg8 = no_reg);
95 
96 
97 // -----------------------------------------------------------------------------
98 // Static helper functions.
99 
100 inline MemOperand ContextOperand(Register context, int index) {
101  return MemOperand(context, Context::SlotOffset(index));
102 }
103 
104 
107 }
108 
109 
110 // Generate a MemOperand for loading a field from an object.
111 inline MemOperand FieldMemOperand(Register object, int offset) {
112  return MemOperand(object, offset - kHeapObjectTag);
113 }
114 
115 
116 // Generate a MemOperand for storing arguments 5..N on the stack
117 // when calling CallCFunction().
119  DCHECK(index > kCArgSlotCount);
120  // Argument 5 takes the slot just past the four Arg-slots.
121  int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
122  return MemOperand(sp, offset);
123 }
124 
125 
126 // MacroAssembler implements a collection of frequently used macros.
127 class MacroAssembler: public Assembler {
128  public:
129  // The isolate parameter can be NULL if the macro assembler should
130  // not use isolate-dependent functionality. In this case, it's the
131  // responsibility of the caller to never invoke such function on the
132  // macro assembler.
133  MacroAssembler(Isolate* isolate, void* buffer, int size);
134 
135  // Arguments macros.
136 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
137 #define COND_ARGS cond, r1, r2
138 
139  // Cases when relocation is not needed.
140 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
141  void Name(target_type target, BranchDelaySlot bd = PROTECT); \
142  inline void Name(BranchDelaySlot bd, target_type target) { \
143  Name(target, bd); \
144  } \
145  void Name(target_type target, \
146  COND_TYPED_ARGS, \
147  BranchDelaySlot bd = PROTECT); \
148  inline void Name(BranchDelaySlot bd, \
149  target_type target, \
150  COND_TYPED_ARGS) { \
151  Name(target, COND_ARGS, bd); \
152  }
153 
154 #define DECLARE_BRANCH_PROTOTYPES(Name) \
155  DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
156  DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
157 
159  DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
160  DECLARE_BRANCH_PROTOTYPES(BranchShort)
161 
162 #undef DECLARE_BRANCH_PROTOTYPES
163 #undef COND_TYPED_ARGS
164 #undef COND_ARGS
165 
166 
167  // Jump, Call, and Ret pseudo instructions implementing inter-working.
168 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
169  const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
170 
171  void Jump(Register target, COND_ARGS);
172  void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
173  void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
175  static int CallSize(Register target, COND_ARGS);
176  void Call(Register target, COND_ARGS);
177  static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
178  void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
182  COND_ARGS);
183  void Call(Handle<Code> code,
186  COND_ARGS);
187  void Ret(COND_ARGS);
188  inline void Ret(BranchDelaySlot bd, Condition cond = al,
189  Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
190  Ret(cond, rs, rt, bd);
191  }
192 
193  void Branch(Label* L,
194  Condition cond,
195  Register rs,
196  Heap::RootListIndex index,
197  BranchDelaySlot bdslot = PROTECT);
198 
199 #undef COND_ARGS
200 
201  // Emit code to discard a non-negative number of pointer-sized elements
202  // from the stack, clobbering only the sp register.
203  void Drop(int count,
204  Condition cond = cc_always,
205  Register reg = no_reg,
206  const Operand& op = Operand(no_reg));
207 
208  // Trivial case of DropAndRet that utilizes the delay slot and only emits
209  // 2 instructions.
210  void DropAndRet(int drop);
211 
212  void DropAndRet(int drop,
213  Condition cond,
214  Register reg,
215  const Operand& op);
216 
217  // Swap two registers. If the scratch register is omitted then a slightly
218  // less efficient form using xor instead of mov is emitted.
219  void Swap(Register reg1, Register reg2, Register scratch = no_reg);
220 
221  void Call(Label* target);
222 
223  inline void Move(Register dst, Register src) {
224  if (!dst.is(src)) {
225  mov(dst, src);
226  }
227  }
228 
229  inline void Move(FPURegister dst, FPURegister src) {
230  if (!dst.is(src)) {
231  mov_d(dst, src);
232  }
233  }
234 
235  inline void Move(Register dst_low, Register dst_high, FPURegister src) {
236  mfc1(dst_low, src);
237  Mfhc1(dst_high, src);
238  }
239 
240  inline void FmoveHigh(Register dst_high, FPURegister src) {
241  Mfhc1(dst_high, src);
242  }
243 
244  inline void FmoveLow(Register dst_low, FPURegister src) {
245  mfc1(dst_low, src);
246  }
247 
248  inline void Move(FPURegister dst, Register src_low, Register src_high) {
249  mtc1(src_low, dst);
250  Mthc1(src_high, dst);
251  }
252 
253  // Conditional move.
254  void Move(FPURegister dst, double imm);
255  void Movz(Register rd, Register rs, Register rt);
256  void Movn(Register rd, Register rs, Register rt);
257  void Movt(Register rd, Register rs, uint16_t cc = 0);
258  void Movf(Register rd, Register rs, uint16_t cc = 0);
259 
260  void Clz(Register rd, Register rs);
261 
262  // Jump unconditionally to given label.
263  // We NEED a nop in the branch delay slot, as it used by v8, for example in
264  // CodeGenerator::ProcessDeferred().
265  // Currently the branch delay slot is filled by the MacroAssembler.
266  // Use rather b(Label) for code generation.
267  void jmp(Label* L) {
268  Branch(L);
269  }
270 
271  void Load(Register dst, const MemOperand& src, Representation r);
272  void Store(Register src, const MemOperand& dst, Representation r);
273 
274  // Load an object from the root table.
275  void LoadRoot(Register destination,
276  Heap::RootListIndex index);
277  void LoadRoot(Register destination,
278  Heap::RootListIndex index,
279  Condition cond, Register src1, const Operand& src2);
280 
281  // Store an object to the root table.
282  void StoreRoot(Register source,
283  Heap::RootListIndex index);
284  void StoreRoot(Register source,
285  Heap::RootListIndex index,
286  Condition cond, Register src1, const Operand& src2);
287 
288  // ---------------------------------------------------------------------------
289  // GC Support
290 
292  Register value,
293  Register address);
294 
296  kReturnAtEnd,
298  };
299 
300 
301  // Record in the remembered set the fact that we have a pointer to new space
302  // at the address pointed to by the addr register. Only works if addr is not
303  // in new space.
304  void RememberedSetHelper(Register object, // Used for debug code.
305  Register addr,
306  Register scratch,
307  SaveFPRegsMode save_fp,
308  RememberedSetFinalAction and_then);
309 
310  void CheckPageFlag(Register object,
311  Register scratch,
312  int mask,
313  Condition cc,
314  Label* condition_met);
315 
317  Register scratch,
318  Label* if_deprecated);
319 
320  // Check if object is in new space. Jumps if the object is not in new space.
321  // The register scratch can be object itself, but it will be clobbered.
323  Register scratch,
324  Label* branch) {
325  InNewSpace(object, scratch, ne, branch);
326  }
327 
328  // Check if object is in new space. Jumps if the object is in new space.
329  // The register scratch can be object itself, but scratch will be clobbered.
331  Register scratch,
332  Label* branch) {
333  InNewSpace(object, scratch, eq, branch);
334  }
335 
336  // Check if an object has a given incremental marking color.
337  void HasColor(Register object,
338  Register scratch0,
339  Register scratch1,
340  Label* has_color,
341  int first_bit,
342  int second_bit);
343 
344  void JumpIfBlack(Register object,
345  Register scratch0,
346  Register scratch1,
347  Label* on_black);
348 
349  // Checks the color of an object. If the object is already grey or black
350  // then we just fall through, since it is already live. If it is white and
351  // we can determine that it doesn't need to be scanned, then we just mark it
352  // black and fall through. For the rest we jump to the label so the
353  // incremental marker can fix its assumptions.
355  Register scratch1,
356  Register scratch2,
357  Register scratch3,
358  Label* object_is_white_and_not_data);
359 
360  // Detects conservatively whether an object is data-only, i.e. it does need to
361  // be scanned by the garbage collector.
363  Register scratch,
364  Label* not_data_object);
365 
366  // Notify the garbage collector that we wrote a pointer into an object.
367  // |object| is the object being stored into, |value| is the object being
368  // stored. value and scratch registers are clobbered by the operation.
369  // The offset is the offset from the start of the object, not the offset from
370  // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
372  Register object,
373  int offset,
374  Register value,
375  Register scratch,
376  RAStatus ra_status,
377  SaveFPRegsMode save_fp,
378  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
379  SmiCheck smi_check = INLINE_SMI_CHECK,
380  PointersToHereCheck pointers_to_here_check_for_value =
382 
383  // As above, but the offset has the tag presubtracted. For use with
384  // MemOperand(reg, off).
386  Register context,
387  int offset,
388  Register value,
389  Register scratch,
390  RAStatus ra_status,
391  SaveFPRegsMode save_fp,
392  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
393  SmiCheck smi_check = INLINE_SMI_CHECK,
394  PointersToHereCheck pointers_to_here_check_for_value =
396  RecordWriteField(context,
397  offset + kHeapObjectTag,
398  value,
399  scratch,
400  ra_status,
401  save_fp,
402  remembered_set_action,
403  smi_check,
404  pointers_to_here_check_for_value);
405  }
406 
408  Register object,
409  Register map,
410  Register dst,
411  RAStatus ra_status,
412  SaveFPRegsMode save_fp);
413 
414  // For a given |object| notify the garbage collector that the slot |address|
415  // has been written. |value| is the object being stored. The value and
416  // address registers are clobbered by the operation.
418  Register object,
419  Register address,
420  Register value,
421  RAStatus ra_status,
422  SaveFPRegsMode save_fp,
423  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
424  SmiCheck smi_check = INLINE_SMI_CHECK,
425  PointersToHereCheck pointers_to_here_check_for_value =
427 
428 
429  // ---------------------------------------------------------------------------
430  // Inline caching support.
431 
432  // Generate code for checking access rights - used for security checks
433  // on access to global objects across environments. The holder register
434  // is left untouched, whereas both scratch registers are clobbered.
436  Register scratch,
437  Label* miss);
438 
439  void GetNumberHash(Register reg0, Register scratch);
440 
441  void LoadFromNumberDictionary(Label* miss,
442  Register elements,
443  Register key,
444  Register result,
445  Register reg0,
446  Register reg1,
447  Register reg2);
448 
449 
450  inline void MarkCode(NopMarkerTypes type) {
451  nop(type);
452  }
453 
454  // Check if the given instruction is a 'type' marker.
455  // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
456  // nop(type)). These instructions are generated to mark special location in
457  // the code, like some special IC code.
458  static inline bool IsMarkedCode(Instr instr, int type) {
459  DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
460  return IsNop(instr, type);
461  }
462 
463 
464  static inline int GetCodeMarker(Instr instr) {
465  uint32_t opcode = ((instr & kOpcodeMask));
466  uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
467  uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
468  uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
469 
470  // Return <n> if we have a sll zero_reg, zero_reg, n
471  // else return -1.
472  bool sllzz = (opcode == SLL &&
473  rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
474  rs == static_cast<uint32_t>(ToNumber(zero_reg)));
475  int type =
476  (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
477  DCHECK((type == -1) ||
478  ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
479  return type;
480  }
481 
482 
483 
484  // ---------------------------------------------------------------------------
485  // Allocation support.
486 
487  // Allocate an object in new space or old pointer space. The object_size is
488  // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
489  // is passed. If the space is exhausted control continues at the gc_required
490  // label. The allocated object is returned in result. If the flag
491  // tag_allocated_object is true the result is tagged as as a heap object.
492  // All registers are clobbered also when control continues at the gc_required
493  // label.
494  void Allocate(int object_size,
495  Register result,
496  Register scratch1,
497  Register scratch2,
498  Label* gc_required,
500 
501  void Allocate(Register object_size,
502  Register result,
503  Register scratch1,
504  Register scratch2,
505  Label* gc_required,
507 
508  // Undo allocation in new space. The object passed and objects allocated after
509  // it will no longer be allocated. The caller must make sure that no pointers
510  // are left to the object(s) no longer allocated as they would be invalid when
511  // allocation is undone.
513 
514 
516  Register length,
517  Register scratch1,
518  Register scratch2,
519  Register scratch3,
520  Label* gc_required);
522  Register scratch1, Register scratch2,
523  Register scratch3, Label* gc_required);
525  Register length,
526  Register scratch1,
527  Register scratch2,
528  Label* gc_required);
530  Register scratch1, Register scratch2,
531  Label* gc_required);
533  Register length,
534  Register scratch1,
535  Register scratch2,
536  Label* gc_required);
538  Register scratch1, Register scratch2,
539  Label* gc_required);
540 
541  // Allocates a heap number or jumps to the gc_required label if the young
542  // space is full and a scavenge is needed. All registers are clobbered also
543  // when control continues at the gc_required label.
545  Register scratch1,
546  Register scratch2,
547  Register heap_number_map,
548  Label* gc_required,
549  TaggingMode tagging_mode = TAG_RESULT,
552  FPURegister value,
553  Register scratch1,
554  Register scratch2,
555  Label* gc_required);
556 
557  // ---------------------------------------------------------------------------
558  // Instruction macros.
559 
560 #define DEFINE_INSTRUCTION(instr) \
561  void instr(Register rd, Register rs, const Operand& rt); \
562  void instr(Register rd, Register rs, Register rt) { \
563  instr(rd, rs, Operand(rt)); \
564  } \
565  void instr(Register rs, Register rt, int32_t j) { \
566  instr(rs, rt, Operand(j)); \
567  }
568 
569 #define DEFINE_INSTRUCTION2(instr) \
570  void instr(Register rs, const Operand& rt); \
571  void instr(Register rs, Register rt) { \
572  instr(rs, Operand(rt)); \
573  } \
574  void instr(Register rs, int32_t j) { \
575  instr(rs, Operand(j)); \
576  }
577 
578 #define DEFINE_INSTRUCTION3(instr) \
579  void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
580  void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
581  instr(rd_hi, rd_lo, rs, Operand(rt)); \
582  } \
583  void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
584  instr(rd_hi, rd_lo, rs, Operand(j)); \
585  }
586 
599 
602 
608 
611 
612  // MIPS32 R2 instruction macro.
614 
615 #undef DEFINE_INSTRUCTION
616 #undef DEFINE_INSTRUCTION2
617 
618  void Pref(int32_t hint, const MemOperand& rs);
619 
620 
621  // ---------------------------------------------------------------------------
622  // Pseudo-instructions.
623 
624  void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
625 
626  void Ulw(Register rd, const MemOperand& rs);
627  void Usw(Register rd, const MemOperand& rs);
628 
629  // Load int32 in the rd register.
631  inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
632  li(rd, Operand(j), mode);
633  }
635 
636  // Push multiple registers on the stack.
637  // Registers are saved in numerical order, with higher numbered registers
638  // saved in higher memory addresses.
639  void MultiPush(RegList regs);
641 
642  void MultiPushFPU(RegList regs);
644 
645  void push(Register src) {
646  Addu(sp, sp, Operand(-kPointerSize));
647  sw(src, MemOperand(sp, 0));
648  }
649  void Push(Register src) { push(src); }
650 
651  // Push a handle.
653  void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
654 
655  // Push two registers. Pushes leftmost register first (to highest address).
656  void Push(Register src1, Register src2) {
657  Subu(sp, sp, Operand(2 * kPointerSize));
658  sw(src1, MemOperand(sp, 1 * kPointerSize));
659  sw(src2, MemOperand(sp, 0 * kPointerSize));
660  }
661 
662  // Push three registers. Pushes leftmost register first (to highest address).
663  void Push(Register src1, Register src2, Register src3) {
664  Subu(sp, sp, Operand(3 * kPointerSize));
665  sw(src1, MemOperand(sp, 2 * kPointerSize));
666  sw(src2, MemOperand(sp, 1 * kPointerSize));
667  sw(src3, MemOperand(sp, 0 * kPointerSize));
668  }
669 
670  // Push four registers. Pushes leftmost register first (to highest address).
671  void Push(Register src1, Register src2, Register src3, Register src4) {
672  Subu(sp, sp, Operand(4 * kPointerSize));
673  sw(src1, MemOperand(sp, 3 * kPointerSize));
674  sw(src2, MemOperand(sp, 2 * kPointerSize));
675  sw(src3, MemOperand(sp, 1 * kPointerSize));
676  sw(src4, MemOperand(sp, 0 * kPointerSize));
677  }
678 
679  void Push(Register src, Condition cond, Register tst1, Register tst2) {
680  // Since we don't have conditional execution we use a Branch.
681  Branch(3, cond, tst1, Operand(tst2));
682  Subu(sp, sp, Operand(kPointerSize));
683  sw(src, MemOperand(sp, 0));
684  }
685 
686  // Pops multiple values from the stack and load them in the
687  // registers specified in regs. Pop order is the opposite as in MultiPush.
688  void MultiPop(RegList regs);
690 
691  void MultiPopFPU(RegList regs);
693 
694  void pop(Register dst) {
695  lw(dst, MemOperand(sp, 0));
696  Addu(sp, sp, Operand(kPointerSize));
697  }
698  void Pop(Register dst) { pop(dst); }
699 
700  // Pop two registers. Pops rightmost register first (from lower address).
701  void Pop(Register src1, Register src2) {
702  DCHECK(!src1.is(src2));
703  lw(src2, MemOperand(sp, 0 * kPointerSize));
704  lw(src1, MemOperand(sp, 1 * kPointerSize));
705  Addu(sp, sp, 2 * kPointerSize);
706  }
707 
708  // Pop three registers. Pops rightmost register first (from lower address).
709  void Pop(Register src1, Register src2, Register src3) {
710  lw(src3, MemOperand(sp, 0 * kPointerSize));
711  lw(src2, MemOperand(sp, 1 * kPointerSize));
712  lw(src1, MemOperand(sp, 2 * kPointerSize));
713  Addu(sp, sp, 3 * kPointerSize);
714  }
715 
716  void Pop(uint32_t count = 1) {
717  Addu(sp, sp, Operand(count * kPointerSize));
718  }
719 
720  // Push and pop the registers that can hold pointers, as defined by the
721  // RegList constant kSafepointSavedRegisters.
724  // Store value in register src in the safepoint stack slot for
725  // register dst.
727  // Load the value of the src register from its safepoint stack slot
728  // into register dst.
730 
731  // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
732  // from C.
733  // Does not handle errors.
734  void FlushICache(Register address, unsigned instructions);
735 
736  // MIPS32 R2 instruction macro.
739 
740  // ---------------------------------------------------------------------------
741  // FPU macros. These do not handle special cases like NaN or +- inf.
742 
743  // Convert unsigned word to double.
745  void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
746 
747  // Convert double to unsigned word.
750 
755 
756  // FP32 mode: Move the general purpose register into
757  // the high part of the double-register pair.
758  // FP64 mode: Move the general-purpose register into
759  // the higher 32 bits of the 64-bit coprocessor register,
760  // while leaving the low bits unchanged.
761  void Mthc1(Register rt, FPURegister fs);
762 
763  // FP32 mode: move the high part of the double-register pair into
764  // general purpose register.
765  // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into
766  // general-purpose register.
767  void Mfhc1(Register rt, FPURegister fs);
768 
769  // Wrapper function for the different cmp/branch types.
770  void BranchF(Label* target,
771  Label* nan,
772  Condition cc,
773  FPURegister cmp1,
774  FPURegister cmp2,
775  BranchDelaySlot bd = PROTECT);
776 
777  // Alternate (inline) version for better readability with USE_DELAY_SLOT.
778  inline void BranchF(BranchDelaySlot bd,
779  Label* target,
780  Label* nan,
781  Condition cc,
782  FPURegister cmp1,
783  FPURegister cmp2) {
784  BranchF(target, nan, cc, cmp1, cmp2, bd);
785  }
786 
787  // Truncates a double using a specific rounding mode, and writes the value
788  // to the result register.
789  // The except_flag will contain any exceptions caused by the instruction.
790  // If check_inexact is kDontCheckForInexactConversion, then the inexact
791  // exception is masked.
792  void EmitFPUTruncate(FPURoundingMode rounding_mode,
793  Register result,
794  DoubleRegister double_input,
795  Register scratch,
796  DoubleRegister double_scratch,
797  Register except_flag,
798  CheckForInexactConversion check_inexact
800 
801  // Performs a truncating conversion of a floating point number as used by
802  // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
803  // succeeds, otherwise falls through if result is saturated. On return
804  // 'result' either holds answer, or is clobbered on fall through.
805  //
806  // Only public for the test code in test-code-stubs-arm.cc.
808  DoubleRegister input,
809  Label* done);
810 
811  // Performs a truncating conversion of a floating point number as used by
812  // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
813  // Exits with 'result' holding the answer.
814  void TruncateDoubleToI(Register result, DoubleRegister double_input);
815 
816  // Performs a truncating conversion of a heap number as used by
817  // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
818  // must be different registers. Exits with 'result' holding the answer.
820 
821  // Converts the smi or heap number in object to an int32 using the rules
822  // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
823  // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
824  // different registers.
826  Register result,
827  Register heap_number_map,
828  Register scratch,
829  Label* not_int32);
830 
831  // Loads the number from object into dst register.
832  // If |object| is neither smi nor heap number, |not_number| is jumped to
833  // with |object| still intact.
834  void LoadNumber(Register object,
835  FPURegister dst,
836  Register heap_number_map,
837  Register scratch,
838  Label* not_number);
839 
840  // Loads the number from object into double_dst in the double format.
841  // Control will jump to not_int32 if the value cannot be exactly represented
842  // by a 32-bit integer.
843  // Floating point value in the 32-bit integer range that are not exact integer
844  // won't be loaded.
846  DoubleRegister double_dst,
847  Register heap_number_map,
848  Register scratch1,
849  Register scratch2,
850  FPURegister double_scratch,
851  Label* not_int32);
852 
853  // Loads the number from object into dst as a 32-bit integer.
854  // Control will jump to not_int32 if the object cannot be exactly represented
855  // by a 32-bit integer.
856  // Floating point value in the 32-bit integer range that are not exact integer
857  // won't be converted.
859  Register dst,
860  Register heap_number_map,
861  Register scratch1,
862  Register scratch2,
863  FPURegister double_scratch0,
864  FPURegister double_scratch1,
865  Label* not_int32);
866 
867  // Enter exit frame.
868  // argc - argument count to be dropped by LeaveExitFrame.
869  // save_doubles - saves FPU registers on stack, currently disabled.
870  // stack_space - extra stack space.
871  void EnterExitFrame(bool save_doubles,
872  int stack_space = 0);
873 
874  // Leave the current exit frame.
875  void LeaveExitFrame(bool save_doubles,
876  Register arg_count,
877  bool restore_context,
878  bool do_return = NO_EMIT_RETURN);
879 
880  // Get the actual activation frame alignment for target environment.
882 
883  // Make sure the stack is aligned. Only emits code in debug mode.
885 
886  void LoadContext(Register dst, int context_chain_length);
887 
888  // Conditionally load the cached Array transitioned map of type
889  // transitioned_kind from the native context if the map in register
890  // map_in_out is the cached Array map in the native context of
891  // expected_kind.
893  ElementsKind expected_kind,
894  ElementsKind transitioned_kind,
895  Register map_in_out,
896  Register scratch,
897  Label* no_map_match);
898 
899  void LoadGlobalFunction(int index, Register function);
900 
901  // Load the initial map from the global function. The registers
902  // function and map can be the same, function is then overwritten.
904  Register map,
905  Register scratch);
906 
908  ExternalReference roots_array_start =
909  ExternalReference::roots_array_start(isolate());
910  li(kRootRegister, Operand(roots_array_start));
911  }
912 
913  // -------------------------------------------------------------------------
914  // JavaScript invokes.
915 
916  // Invoke the JavaScript function code by either calling or jumping.
917  void InvokeCode(Register code,
918  const ParameterCount& expected,
919  const ParameterCount& actual,
921  const CallWrapper& call_wrapper);
922 
923  // Invoke the JavaScript function in the given register. Changes the
924  // current context to the context in the function before invoking.
925  void InvokeFunction(Register function,
926  const ParameterCount& actual,
928  const CallWrapper& call_wrapper);
929 
930  void InvokeFunction(Register function,
931  const ParameterCount& expected,
932  const ParameterCount& actual,
934  const CallWrapper& call_wrapper);
935 
937  const ParameterCount& expected,
938  const ParameterCount& actual,
940  const CallWrapper& call_wrapper);
941 
942 
943  void IsObjectJSObjectType(Register heap_object,
944  Register map,
945  Register scratch,
946  Label* fail);
947 
949  Register scratch,
950  Label* fail);
951 
953  Register scratch,
954  Label* fail);
955 
957  Register scratch,
958  Label* fail);
959 
960  // -------------------------------------------------------------------------
961  // Debugger Support.
962 
963  void DebugBreak();
964 
965  // -------------------------------------------------------------------------
966  // Exception handling.
967 
968  // Push a new try handler and link into try handler chain.
969  void PushTryHandler(StackHandler::Kind kind, int handler_index);
970 
971  // Unlink the stack handler on top of the stack from the try handler chain.
972  // Must preserve the result register.
974 
975  // Passes thrown value to the handler of top of the try handler chain.
976  void Throw(Register value);
977 
978  // Propagates an uncatchable exception to the top of the current JS stack's
979  // handler chain.
981 
982  // Copies a fixed number of fields of heap objects from src to dst.
983  void CopyFields(Register dst, Register src, RegList temps, int field_count);
984 
985  // Copies a number of bytes from src to dst. All registers are clobbered. On
986  // exit src and dst will point to the place just after where the last byte was
987  // read or written and length will be zero.
988  void CopyBytes(Register src,
989  Register dst,
990  Register length,
991  Register scratch);
992 
993  // Initialize fields with filler values. Fields starting at |start_offset|
994  // not including end_offset are overwritten with the value in |filler|. At
995  // the end the loop, |start_offset| takes the value of |end_offset|.
997  Register end_offset,
998  Register filler);
999 
1000  // -------------------------------------------------------------------------
1001  // Support functions.
1002 
1003  // Try to get function prototype of a function and puts the value in
1004  // the result register. Checks that the function really is a
1005  // function and jumps to the miss label if the fast checks fail. The
1006  // function register will be untouched; the other registers may be
1007  // clobbered.
1009  Register result,
1010  Register scratch,
1011  Label* miss,
1012  bool miss_on_bound_function = false);
1013 
1014  void GetObjectType(Register function,
1015  Register map,
1016  Register type_reg);
1017 
1018  // Check if a map for a JSObject indicates that the object has fast elements.
1019  // Jump to the specified label if it does not.
1021  Register scratch,
1022  Label* fail);
1023 
1024  // Check if a map for a JSObject indicates that the object can have both smi
1025  // and HeapObject elements. Jump to the specified label if it does not.
1027  Register scratch,
1028  Label* fail);
1029 
1030  // Check if a map for a JSObject indicates that the object has fast smi only
1031  // elements. Jump to the specified label if it does not.
1033  Register scratch,
1034  Label* fail);
1035 
1036  // Check to see if maybe_number can be stored as a double in
1037  // FastDoubleElements. If it can, store it at the index specified by key in
1038  // the FastDoubleElements array elements. Otherwise jump to fail.
1040  Register key_reg,
1041  Register elements_reg,
1042  Register scratch1,
1043  Register scratch2,
1044  Register scratch3,
1045  Label* fail,
1046  int elements_offset = 0);
1047 
1048  // Compare an object's map with the specified map and its transitioned
1049  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
1050  // "branch_to" if the result of the comparison is "cond". If multiple map
1051  // compares are required, the compare sequences branches to early_success.
1053  Register scratch,
1054  Handle<Map> map,
1055  Label* early_success,
1056  Condition cond,
1057  Label* branch_to);
1058 
1059  // As above, but the map of the object is already loaded into the register
1060  // which is preserved by the code generated.
1062  Handle<Map> map,
1063  Label* early_success,
1064  Condition cond,
1065  Label* branch_to);
1066 
1067  // Check if the map of an object is equal to a specified map and branch to
1068  // label if not. Skip the smi check if not required (object is known to be a
1069  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1070  // against maps that are ElementsKind transition maps of the specificed map.
1071  void CheckMap(Register obj,
1072  Register scratch,
1073  Handle<Map> map,
1074  Label* fail,
1075  SmiCheckType smi_check_type);
1076 
1077 
1078  void CheckMap(Register obj,
1079  Register scratch,
1080  Heap::RootListIndex index,
1081  Label* fail,
1082  SmiCheckType smi_check_type);
1083 
1084  // Check if the map of an object is equal to a specified map and branch to a
1085  // specified target if equal. Skip the smi check if not required (object is
1086  // known to be a heap object)
1088  Register scratch,
1089  Handle<Map> map,
1090  Handle<Code> success,
1091  SmiCheckType smi_check_type);
1092 
1093 
1094  // Load and check the instance type of an object for being a string.
1095  // Loads the type into the second argument register.
1096  // Returns a condition that will be enabled if the object was a string.
1098  Register type,
1099  Register result) {
1102  And(type, type, Operand(kIsNotStringMask));
1103  DCHECK_EQ(0, kStringTag);
1104  return eq;
1105  }
1106 
1107 
1108  // Picks out an array index from the hash field.
1109  // Register use:
1110  // hash - holds the index's hash. Clobbered.
1111  // index - holds the overwritten index on exit.
1112  void IndexFromHash(Register hash, Register index);
1113 
1114  // Get the number of least significant bits from a register.
1115  void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1116  void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1117 
1118  // Load the value of a number object into a FPU double register. If the
1119  // object is not a number a jump to the label not_number is performed
1120  // and the FPU double register is unchanged.
1122  Register object,
1123  FPURegister value,
1124  Register scratch1,
1125  Register scratch2,
1126  Register heap_number_map,
1127  Label* not_number,
1129 
1130  // Load the value of a smi object into a FPU double register. The register
1131  // scratch1 can be the same register as smi in which case smi will hold the
1132  // untagged value afterwards.
1134  FPURegister value,
1135  Register scratch1);
1136 
1137  // -------------------------------------------------------------------------
1138  // Overflow handling functions.
1139  // Usage: first call the appropriate arithmetic function, then call one of the
1140  // jump functions with the overflow_dst register as the second parameter.
1141 
1143  Register left,
1144  Register right,
1145  Register overflow_dst,
1146  Register scratch = at);
1147 
1149  const Operand& right, Register overflow_dst,
1150  Register scratch = at);
1151 
1153  Register left,
1154  Register right,
1155  Register overflow_dst,
1156  Register scratch = at);
1157 
1159  const Operand& right, Register overflow_dst,
1160  Register scratch = at);
1161 
1162  void BranchOnOverflow(Label* label,
1163  Register overflow_check,
1164  BranchDelaySlot bd = PROTECT) {
1165  Branch(label, lt, overflow_check, Operand(zero_reg), bd);
1166  }
1167 
1168  void BranchOnNoOverflow(Label* label,
1169  Register overflow_check,
1170  BranchDelaySlot bd = PROTECT) {
1171  Branch(label, ge, overflow_check, Operand(zero_reg), bd);
1172  }
1173 
1174  void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1175  Ret(lt, overflow_check, Operand(zero_reg), bd);
1176  }
1177 
1178  void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1179  Ret(ge, overflow_check, Operand(zero_reg), bd);
1180  }
1181 
1182  // -------------------------------------------------------------------------
1183  // Runtime calls.
1184 
1185  // See comments at the beginning of CEntryStub::Generate.
1186  inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
1187 
1188  inline void PrepareCEntryFunction(const ExternalReference& ref) {
1189  li(a1, Operand(ref));
1190  }
1191 
1192 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
1193 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
1194 
1195  // Call a code stub.
1196  void CallStub(CodeStub* stub,
1198  COND_ARGS);
1199 
1200  // Tail call a code stub (jump).
1202 
1203 #undef COND_ARGS
1204 
1206 
1207  // Call a runtime routine.
1209  int num_arguments,
1210  SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1212  const Runtime::Function* function = Runtime::FunctionForId(id);
1213  CallRuntime(function, function->nargs, kSaveFPRegs);
1214  }
1215 
1216  // Convenience function: Same as above, but takes the fid instead.
1218  int num_arguments,
1219  SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1220  CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1221  }
1222 
1223  // Convenience function: call an external reference.
1224  void CallExternalReference(const ExternalReference& ext,
1225  int num_arguments,
1226  BranchDelaySlot bd = PROTECT);
1227 
1228  // Tail call of a runtime routine (jump).
1229  // Like JumpToExternalReference, but also takes care of passing the number
1230  // of parameters.
1231  void TailCallExternalReference(const ExternalReference& ext,
1232  int num_arguments,
1233  int result_size);
1234 
1235  // Convenience function: tail call a runtime routine (jump).
1237  int num_arguments,
1238  int result_size);
1239 
1240  int CalculateStackPassedWords(int num_reg_arguments,
1241  int num_double_arguments);
1242 
1243  // Before calling a C-function from generated code, align arguments on stack
1244  // and add space for the four mips argument slots.
1245  // After aligning the frame, non-register arguments must be stored on the
1246  // stack, after the argument-slots using helper: CFunctionArgumentOperand().
1247  // The argument count assumes all arguments are word sized.
1248  // Some compilers/platforms require the stack to be aligned when calling
1249  // C++ code.
1250  // Needs a scratch register to do some arithmetic. This register will be
1251  // trashed.
1252  void PrepareCallCFunction(int num_reg_arguments,
1253  int num_double_registers,
1254  Register scratch);
1255  void PrepareCallCFunction(int num_reg_arguments,
1256  Register scratch);
1257 
1258  // Arguments 1-4 are placed in registers a0 thru a3 respectively.
1259  // Arguments 5..n are stored to stack using following:
1260  // sw(t0, CFunctionArgumentOperand(5));
1261 
1262  // Calls a C function and cleans up the space for arguments allocated
1263  // by PrepareCallCFunction. The called function is not allowed to trigger a
1264  // garbage collection, since that might move the code and invalidate the
1265  // return address (unless this is somehow accounted for by the called
1266  // function).
1267  void CallCFunction(ExternalReference function, int num_arguments);
1268  void CallCFunction(Register function, int num_arguments);
1269  void CallCFunction(ExternalReference function,
1270  int num_reg_arguments,
1271  int num_double_arguments);
1272  void CallCFunction(Register function,
1273  int num_reg_arguments,
1274  int num_double_arguments);
1277 
1278  // There are two ways of passing double arguments on MIPS, depending on
1279  // whether soft or hard floating point ABI is used. These functions
1280  // abstract parameter passing for the three different ways we call
1281  // C functions from generated code.
1285 
1286  // Calls an API function. Allocates HandleScope, extracts returned value
1287  // from handle and propagates exceptions. Restores context. stack_space
1288  // - space to be unwound on exit (includes the call JS arguments space and
1289  // the additional space allocated for the fast call).
1290  void CallApiFunctionAndReturn(Register function_address,
1291  ExternalReference thunk_ref,
1292  int stack_space,
1293  MemOperand return_value_operand,
1294  MemOperand* context_restore_operand);
1295 
1296  // Jump to the builtin routine.
1297  void JumpToExternalReference(const ExternalReference& builtin,
1298  BranchDelaySlot bd = PROTECT);
1299 
1300  // Invoke specified builtin JavaScript function. Adds an entry to
1301  // the unresolved list if the name does not resolve.
1303  InvokeFlag flag,
1304  const CallWrapper& call_wrapper = NullCallWrapper());
1305 
1306  // Store the code object for the given builtin in the target register and
1307  // setup the function in a1.
1309 
1310  // Store the function for the given builtin in the target register.
1312 
1313  struct Unresolved {
1314  int pc;
1315  uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
1316  const char* name;
1317  };
1318 
1320  DCHECK(!code_object_.is_null());
1321  return code_object_;
1322  }
1323 
1324  // Emit code for a truncating division by a constant. The dividend register is
1325  // unchanged and at gets clobbered. Dividend and result must be different.
1326  void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1327 
1328  // -------------------------------------------------------------------------
1329  // StatsCounter support.
1330 
1331  void SetCounter(StatsCounter* counter, int value,
1332  Register scratch1, Register scratch2);
1333  void IncrementCounter(StatsCounter* counter, int value,
1334  Register scratch1, Register scratch2);
1335  void DecrementCounter(StatsCounter* counter, int value,
1336  Register scratch1, Register scratch2);
1337 
1338 
1339  // -------------------------------------------------------------------------
1340  // Debugging.
1341 
1342  // Calls Abort(msg) if the condition cc is not satisfied.
1343  // Use --debug_code to enable.
1346 
1347  // Like Assert(), but always enabled.
1349 
1350  // Print a message to stdout and abort execution.
1352 
1353  // Verify restrictions about code generated in stubs.
1354  void set_generating_stub(bool value) { generating_stub_ = value; }
1356  void set_has_frame(bool value) { has_frame_ = value; }
1357  bool has_frame() { return has_frame_; }
1358  inline bool AllowThisStubCall(CodeStub* stub);
1359 
1360  // ---------------------------------------------------------------------------
1361  // Number utilities.
1362 
1363  // Check whether the value of reg is a power of two and not zero. If not
1364  // control continues at the label not_power_of_two. If reg is a power of two
1365  // the register scratch contains the value of (reg - 1) when control falls
1366  // through.
1368  Register scratch,
1369  Label* not_power_of_two_or_zero);
1370 
1371  // -------------------------------------------------------------------------
1372  // Smi utilities.
1373 
1374  void SmiTag(Register reg) {
1375  Addu(reg, reg, reg);
1376  }
1377 
1378  // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1381 
1382  void SmiTag(Register dst, Register src) {
1383  Addu(dst, src, src);
1384  }
1385 
1386  // Try to convert int32 to smi. If the value is to large, preserve
1387  // the original value and jump to not_a_smi. Destroys scratch and
1388  // sets flags.
1389  void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
1390  TrySmiTag(reg, reg, scratch, not_a_smi);
1391  }
1393  Register src,
1394  Register scratch,
1395  Label* not_a_smi) {
1396  SmiTagCheckOverflow(at, src, scratch);
1397  BranchOnOverflow(not_a_smi, scratch);
1398  mov(dst, at);
1399  }
1400 
1401  void SmiUntag(Register reg) {
1402  sra(reg, reg, kSmiTagSize);
1403  }
1404 
1405  void SmiUntag(Register dst, Register src) {
1406  sra(dst, src, kSmiTagSize);
1407  }
1408 
1409  // Test if the register contains a smi.
1410  inline void SmiTst(Register value, Register scratch) {
1411  And(scratch, value, Operand(kSmiTagMask));
1412  }
1413  inline void NonNegativeSmiTst(Register value, Register scratch) {
1414  And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
1415  }
1416 
1417  // Untag the source value into destination and jump if source is a smi.
1418  // Souce and destination can be the same register.
1419  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1420 
1421  // Untag the source value into destination and jump if source is not a smi.
1422  // Souce and destination can be the same register.
1423  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1424 
1425  // Jump the register contains a smi.
1426  void JumpIfSmi(Register value,
1427  Label* smi_label,
1428  Register scratch = at,
1429  BranchDelaySlot bd = PROTECT);
1430 
1431  // Jump if the register contains a non-smi.
1433  Label* not_smi_label,
1434  Register scratch = at,
1435  BranchDelaySlot bd = PROTECT);
1436 
1437  // Jump if either of the registers contain a non-smi.
1438  void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1439  // Jump if either of the registers contain a smi.
1440  void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1441 
1442  // Abort execution if argument is a smi, enabled via --debug-code.
1443  void AssertNotSmi(Register object);
1444  void AssertSmi(Register object);
1445 
1446  // Abort execution if argument is not a string, enabled via --debug-code.
1447  void AssertString(Register object);
1448 
1449  // Abort execution if argument is not a name, enabled via --debug-code.
1450  void AssertName(Register object);
1451 
1452  // Abort execution if argument is not undefined or an AllocationSite, enabled
1453  // via --debug-code.
1455 
1456  // Abort execution if reg is not the root value with the given index,
1457  // enabled via --debug-code.
1459 
1460  // ---------------------------------------------------------------------------
1461  // HeapNumber utilities.
1462 
1464  Register heap_number_map,
1465  Register scratch,
1466  Label* on_not_heap_number);
1467 
1468  // -------------------------------------------------------------------------
1469  // String utilities.
1470 
1471  // Generate code to do a lookup in the number string cache. If the number in
1472  // the register object is found in the cache the generated code falls through
1473  // with the result in the result register. The object and the result register
1474  // can be the same. If the number is not found in the cache the code jumps to
1475  // the label not_found with only the content of register object unchanged.
1477  Register result,
1478  Register scratch1,
1479  Register scratch2,
1480  Register scratch3,
1481  Label* not_found);
1482 
1483  // Checks if both instance types are sequential ASCII strings and jumps to
1484  // label if either is not.
1486  Register first_object_instance_type, Register second_object_instance_type,
1487  Register scratch1, Register scratch2, Label* failure);
1488 
1489  // Check if instance type is sequential one-byte string and jump to label if
1490  // it is not.
1492  Label* failure);
1493 
1494  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1495 
1497  Register index,
1498  Register value,
1499  Register scratch,
1500  uint32_t encoding_mask);
1501 
1502  // Checks if both objects are sequential one-byte strings and jumps to label
1503  // if either is not. Assumes that neither object is a smi.
1505  Register second,
1506  Register scratch1,
1507  Register scratch2,
1508  Label* failure);
1509 
1510  // Checks if both objects are sequential one-byte strings and jumps to label
1511  // if either is not.
1513  Register scratch1,
1514  Register scratch2,
1515  Label* not_flat_one_byte_strings);
1516 
1517  void ClampUint8(Register output_reg, Register input_reg);
1518 
1519  void ClampDoubleToUint8(Register result_reg,
1520  DoubleRegister input_reg,
1521  DoubleRegister temp_double_reg);
1522 
1523 
1527 
1528  template<typename Field>
1529  void DecodeField(Register dst, Register src) {
1530  Ext(dst, src, Field::kShift, Field::kSize);
1531  }
1532 
1533  template<typename Field>
1534  void DecodeField(Register reg) {
1535  DecodeField<Field>(reg, reg);
1536  }
1537 
1538  template<typename Field>
1540  static const int shift = Field::kShift;
1541  static const int mask = Field::kMask >> shift << kSmiTagSize;
1542  STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
1543  STATIC_ASSERT(kSmiTag == 0);
1544  if (shift < kSmiTagSize) {
1545  sll(dst, src, kSmiTagSize - shift);
1546  And(dst, dst, Operand(mask));
1547  } else if (shift > kSmiTagSize) {
1548  srl(dst, src, shift - kSmiTagSize);
1549  And(dst, dst, Operand(mask));
1550  } else {
1551  And(dst, src, Operand(mask));
1552  }
1553  }
1554 
1555  template<typename Field>
1557  DecodeField<Field>(reg, reg);
1558  }
1559 
1560  // Generates function and stub prologue code.
1562  void Prologue(bool code_pre_aging);
1563 
1564  // Activation support.
1567 
1568  // Patch the relocated value (lui/ori pair).
1569  void PatchRelocatedValue(Register li_location,
1570  Register scratch,
1571  Register new_value);
1572  // Get the relocatad value (loaded data) from the lui/ori pair.
1573  void GetRelocatedValue(Register li_location,
1574  Register value,
1575  Register scratch);
1576 
1577  // Expects object in a0 and returns map with validated enum cache
1578  // in a0. Assumes that any other register can be used as a scratch.
1579  void CheckEnumCache(Register null_value, Label* call_runtime);
1580 
1581  // AllocationMemento support. Arrays may have an associated
1582  // AllocationMemento object that can be checked for in order to pretransition
1583  // to another type.
1584  // On entry, receiver_reg should point to the array object.
1585  // scratch_reg gets clobbered.
1586  // If allocation info is present, jump to allocation_memento_present.
1588  Register receiver_reg,
1589  Register scratch_reg,
1590  Label* no_memento_found,
1591  Condition cond = al,
1592  Label* allocation_memento_present = NULL);
1593 
1595  Register scratch_reg,
1596  Label* memento_found) {
1597  Label no_memento_found;
1598  TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1599  &no_memento_found, eq, memento_found);
1600  bind(&no_memento_found);
1601  }
1602 
1603  // Jumps to found label if a prototype map has dictionary elements.
1605  Register scratch1, Label* found);
1606 
1607  private:
1609  int num_reg_arguments,
1610  int num_double_arguments);
1611 
1614  const Operand& rt,
1615  BranchDelaySlot bdslot = PROTECT);
1617  void BranchAndLinkShort(Label* L, Condition cond, Register rs,
1618  const Operand& rt,
1619  BranchDelaySlot bdslot = PROTECT);
1620  void J(Label* L, BranchDelaySlot bdslot);
1621  void Jr(Label* L, BranchDelaySlot bdslot);
1622  void Jalr(Label* L, BranchDelaySlot bdslot);
1623 
1624  // Helper functions for generating invokes.
1625  void InvokePrologue(const ParameterCount& expected,
1626  const ParameterCount& actual,
1627  Handle<Code> code_constant,
1628  Register code_reg,
1629  Label* done,
1630  bool* definitely_mismatches,
1631  InvokeFlag flag,
1632  const CallWrapper& call_wrapper);
1633 
1634  // Get the code for the given builtin. Returns if able to resolve
1635  // the function in the 'resolved' flag.
1637 
1639  Register length,
1640  Heap::RootListIndex map_index,
1641  Register scratch1,
1642  Register scratch2);
1643 
1644  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1645  void InNewSpace(Register object,
1646  Register scratch,
1647  Condition cond, // eq for new space, ne otherwise.
1648  Label* branch);
1649 
1650  // Helper for finding the mark bits for an address. Afterwards, the
1651  // bitmap register points at the word with the mark bits and the mask
1652  // the position of the first bit. Leaves addr_reg unchanged.
1653  inline void GetMarkBits(Register addr_reg,
1654  Register bitmap_reg,
1655  Register mask_reg);
1656 
1657  // Helper for throwing exceptions. Compute a handler address and jump to
1658  // it. See the implementation for register usage.
1660 
1661  // Compute memory operands for safepoint stack slots.
1662  static int SafepointRegisterStackIndex(int reg_code);
1665 
1666  bool generating_stub_;
1667  bool has_frame_;
1668  // This handle will be patched with the code object on installation.
1670 
1671  // Needs access to SafepointRegisterStackIndex for compiled frame
1672  // traversal.
1673  friend class StandardFrame;
1674 };
1675 
1676 
1677 // The code patcher is used to patch (typically) small parts of code e.g. for
1678 // debugging and other types of instrumentation. When using the code patcher
1679 // the exact number of bytes specified must be emitted. It is not legal to emit
1680 // relocation information. If any of these constraints are violated it causes
1681 // an assertion to fail.
1682 class CodePatcher {
1683  public:
1685  FLUSH,
1686  DONT_FLUSH
1687  };
1688 
1689  CodePatcher(byte* address,
1690  int instructions,
1691  FlushICache flush_cache = FLUSH);
1692  virtual ~CodePatcher();
1693 
1694  // Macro assembler to emit code.
1695  MacroAssembler* masm() { return &masm_; }
1696 
1697  // Emit an instruction directly.
1698  void Emit(Instr instr);
1699 
1700  // Emit an address directly.
1701  void Emit(Address addr);
1702 
1703  // Change the condition part of an instruction leaving the rest of the current
1704  // instruction unchanged.
1706 
1707  private:
1708  byte* address_; // The address of the code being patched.
1709  int size_; // Number of bytes of the expected patch size.
1710  MacroAssembler masm_; // Macro assembler used to generate the code.
1711  FlushICache flush_cache_; // Whether to flush the I cache after patching.
1712 };
1713 
1714 
1715 
1716 #ifdef GENERATED_CODE_COVERAGE
1717 #define CODE_COVERAGE_STRINGIFY(x) #x
1718 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1719 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1720 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1721 #else
1722 #define ACCESS_MASM(masm) masm->
1723 #endif
1724 
1725 } } // namespace v8::internal
1726 
1727 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
Isolate * isolate() const
Definition: assembler.h:62
void lbu(Register rd, const MemOperand &rs)
void hint(SystemHint code)
void mov_d(FPURegister fd, FPURegister fs)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
void mtc1(Register rt, FPURegister fs)
void lw(Register rd, const MemOperand &rs)
void shift(Register dst, Immediate shift_amount, int subcode, int size)
void or_(Register dst, int32_t imm32)
void srl(Register rd, Register rt, uint16_t sa)
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop=false)
void mfc1(Register rt, FPURegister fs)
void sw(Register rd, const MemOperand &rs)
void sra(Register rt, Register rd, uint16_t sa)
void Emit(Address addr)
void ChangeBranchCondition(Condition cond)
void Emit(Instr instr)
CodePatcher(byte *address, int instructions, FlushICache flush_cache=FLUSH)
static int SlotOffset(int index)
Definition: contexts.h:552
static const int kMapOffset
Definition: objects.h:1427
void Clz(Register rd, Register rs)
void Mul(const Register &rd, const Register &rn, const Register &rm)
void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch)
void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2)
void Ret(BranchDelaySlot bd, Condition cond=al, Register rs=zero_reg, const Operand &rt=Operand(zero_reg))
void CopyBytes(Register src, Register dst, Register length, Register scratch)
void Pop(Register src1, Register src2, Register src3)
void GetObjectType(Register function, Register map, Register type_reg)
void JumpIfDataObject(Register value, Register scratch, Label *not_data_object)
void CompareMapAndBranch(Register obj_map, Handle< Map > map, Label *early_success, Condition cond, Label *branch_to)
void Store(Register src, const MemOperand &dst, Representation r)
void Check(Condition cc, BailoutReason reason, Register rs, Operand rt)
void BranchAndLinkShort(int16_t offset, Condition cond, Register rs, const Operand &rt, BranchDelaySlot bdslot=PROTECT)
void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd=PROTECT)
void BranchAndLinkShort(Label *L, BranchDelaySlot bdslot=PROTECT)
void JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first_object_instance_type, Register second_object_instance_type, Register scratch1, Register scratch2, Label *failure)
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits)
void PushTryHandler(StackHandler::Kind kind, int handler_index)
void TrySmiTag(Register dst, Register src, Register scratch, Label *not_a_smi)
void Jalr(Label *L, BranchDelaySlot bdslot)
void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond, Register src1, const Operand &src2)
void InitializeNewString(Register string, Register length, Heap::RootListIndex map_index, Register scratch1, Register scratch2)
void Move(FPURegister dst, FPURegister src)
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits)
void Swap(Register reg1, Register reg2, Register scratch=no_reg)
void CallCFunction(ExternalReference function, int num_arguments)
void AssertName(Register object)
int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments)
void GetRelocatedValue(Register li_location, Register value, Register scratch)
void InvokeFunction(Handle< JSFunction > function, const ParameterCount &expected, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
void Neg(const Register &rd, const Operand &operand)
void AllocateOneByteString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void TruncateDoubleToI(Register result, DoubleRegister double_input)
void RecordWriteForMap(Register object, Register map, Register dst, RAStatus ra_status, SaveFPRegsMode save_fp)
void EnterFrame(StackFrame::Type type)
void IsObjectNameType(Register object, Register scratch, Label *fail)
void CheckFastSmiElements(Register map, Register scratch, Label *fail)
void LoadContext(Register dst, int context_chain_length)
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind, ElementsKind transitioned_kind, Register map_in_out, Register scratch, Label *no_map_match)
void MultiPushReversedFPU(RegList regs)
void LoadNumberAsInt32(Register object, Register dst, Register heap_number_map, Register scratch1, Register scratch2, FPURegister double_scratch0, FPURegister double_scratch1, Label *not_int32)
void Move(FPURegister dst, double imm)
void IsObjectJSStringType(Register object, Register scratch, Label *fail)
void DecodeField(Register dst, Register src)
void LeaveExitFrame(bool save_doubles, Register arg_count, bool restore_context, bool do_return=NO_EMIT_RETURN)
void AllocateHeapNumberWithValue(Register result, FPURegister value, Register scratch1, Register scratch2, Label *gc_required)
void Call(Register target, COND_ARGS)
int CallSize(Handle< Code > code, RelocInfo::Mode rmode=RelocInfo::CODE_TARGET, TypeFeedbackId ast_id=TypeFeedbackId::None(), COND_ARGS)
void LoadRoot(Register destination, Heap::RootListIndex index)
void InitializeFieldsWithFiller(Register start_offset, Register end_offset, Register filler)
void li(Register rd, int32_t j, LiFlags mode=OPTIMIZE_SIZE)
void CallRuntime(Runtime::FunctionId id, int num_arguments, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void Movz(Register rd, Register rs, Register rt)
void PatchRelocatedValue(Register li_location, Register scratch, Register new_value)
void ClampUint8(Register output_reg, Register input_reg)
void JumpIfNotUniqueNameInstanceType(Register reg, Label *not_unique_name)
void MultiPushFPU(RegList regs)
void UndoAllocationInNewSpace(Register object, Register scratch)
void CallRuntime(const Runtime::Function *f, int num_arguments, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void Call(Handle< Code > code, RelocInfo::Mode rmode=RelocInfo::CODE_TARGET, TypeFeedbackId ast_id=TypeFeedbackId::None(), COND_ARGS)
void Move(Register dst, Register src)
void EnsureNotWhite(Register object, Register scratch1, Register scratch2, Register scratch3, Label *object_is_white_and_not_data)
void InvokeFunction(Register function, const ParameterCount &expected, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
void Jr(Label *L, BranchDelaySlot bdslot)
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS)
void StoreRoot(Register source, Heap::RootListIndex index, Condition cond, Register src1, const Operand &src2)
void JumpIfInNewSpace(Register object, Register scratch, Label *branch)
void RecordWriteField(Register object, int offset, Register value, Register scratch, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK, PointersToHereCheck pointers_to_here_check_for_value=kPointersToHereMaybeInteresting)
Condition IsObjectStringType(Register obj, Register type, Register result)
void InNewSpace(Register object, Register scratch, Condition cond, Label *branch)
static bool IsMarkedCode(Instr instr, int type)
void mov(Register rd, Register rt)
void CheckMapDeprecated(Handle< Map > map, Register scratch, Label *if_deprecated)
void TryGetFunctionPrototype(Register function, Register result, Register scratch, Label *miss, bool miss_on_bound_function=false)
void SmiTagCheckOverflow(Register dst, Register src, Register overflow)
void UntagAndJumpIfNotSmi(Register dst, Register src, Label *non_smi_case)
static int ActivationFrameAlignment()
void AllocateTwoByteString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void PrepareCallCFunction(int num_reg_arguments, Register scratch)
void MultiPush(RegList regs)
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper &call_wrapper=NullCallWrapper())
void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot=PROTECT)
void JumpIfNotInNewSpace(Register object, Register scratch, Label *branch)
void Jump(Register target, COND_ARGS)
void FmoveHigh(Register dst_high, FPURegister src)
void EnumLength(Register dst, Register map)
void Ror(const Register &rd, const Register &rs, unsigned shift)
void BranchF(BranchDelaySlot bd, Label *target, Label *nan, Condition cc, FPURegister cmp1, FPURegister cmp2)
void SmiUntag(Register dst, Register src)
void Move(FPURegister dst, Register src_low, Register src_high)
void CallJSExitStub(CodeStub *stub)
void MultiPopReversed(RegList regs)
void AllocateOneByteSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void RecordWriteContextSlot(Register context, int offset, Register value, Register scratch, RAStatus ra_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK, PointersToHereCheck pointers_to_here_check_for_value=kPointersToHereMaybeInteresting)
static int SafepointRegisterStackIndex(int reg_code)
void Trunc_w_d(FPURegister fd, FPURegister fs)
Handle< Code > ResolveBuiltin(Builtins::JavaScript id, bool *resolved)
void NonNegativeSmiTst(Register value, Register scratch)
STATIC_ASSERT((reg_zero==(reg_not_zero ^ 1)) &&(reg_bit_clear==(reg_bit_set ^ 1)) &&(always==(never ^ 1)))
void LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch)
void CheckEnumCache(Register null_value, Label *call_runtime)
void TailCallStub(CodeStub *stub, COND_ARGS)
void JumpToExternalReference(const ExternalReference &builtin, BranchDelaySlot bd=PROTECT)
void JumpIfNotSmi(Register value, Label *not_smi_label, Register scratch=at, BranchDelaySlot bd=PROTECT)
static int GetCodeMarker(Instr instr)
void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt)
void Throw(Register value)
void Allocate(int object_size, Register result, Register scratch1, Register scratch2, Label *gc_required, AllocationFlags flags)
void SubuAndCheckForOverflow(Register dst, Register left, const Operand &right, Register overflow_dst, Register scratch=at)
void StoreRoot(Register source, Heap::RootListIndex index)
void Branch(Label *L, Condition cond, Register rs, Heap::RootListIndex index, BranchDelaySlot bdslot=PROTECT)
void Push(Register src1, Register src2, Register src3)
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS)
void MultiPopFPU(RegList regs)
void IsInstanceJSObjectType(Register map, Register scratch, Label *fail)
void Drop(int count, Condition cond=cc_always, Register reg=no_reg, const Operand &op=Operand(no_reg))
void Allocate(Register object_size, Register result, Register scratch1, Register scratch2, Label *gc_required, AllocationFlags flags)
void AssertString(Register object)
void SmiTag(Register dst, Register src)
void CheckFastObjectElements(Register map, Register scratch, Label *fail)
void StoreToSafepointRegisterSlot(Register src, Register dst)
void AssertFastElements(Register elements)
void AllocateTwoByteConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
bool AllowThisStubCall(CodeStub *stub)
void JumpIfEitherSmi(Register reg1, Register reg2, Label *on_either_smi)
void Abort(BailoutReason msg)
void CallApiFunctionAndReturn(Register function_address, ExternalReference thunk_ref, int stack_space, MemOperand return_value_operand, MemOperand *context_restore_operand)
void IncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void DropAndRet(int drop, Condition cond, Register reg, const Operand &op)
void TruncateNumberToI(Register object, Register result, Register heap_number_map, Register scratch, Label *not_int32)
void MovFromFloatParameter(DoubleRegister dst)
void IndexFromHash(Register hash, Register index)
void GetNumberHash(Register reg0, Register scratch)
void GetBuiltinFunction(Register target, Builtins::JavaScript id)
void LeaveFrame(StackFrame::Type type)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void CallExternalReference(const ExternalReference &ext, int num_arguments, BranchDelaySlot bd=PROTECT)
void Movt(Register rd, Register rs, uint16_t cc=0)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void Mthc1(Register rt, FPURegister fs)
void BranchAndLinkShort(Label *L, Condition cond, Register rs, const Operand &rt, BranchDelaySlot bdslot=PROTECT)
void TailCallExternalReference(const ExternalReference &ext, int num_arguments, int result_size)
void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch)
void InvokeFunction(Register function, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
void RetOnOverflow(Register overflow_check, BranchDelaySlot bd=PROTECT)
void StoreNumberToDoubleElements(Register value_reg, Register key_reg, Register elements_reg, Register scratch1, Register scratch2, Register scratch3, Label *fail, int elements_offset=0)
void CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label *miss)
void DispatchMap(Register obj, Register scratch, Handle< Map > map, Handle< Code > success, SmiCheckType smi_check_type)
static int CallSize(Register target, COND_ARGS)
void MultiPop(RegList regs)
void BranchOnOverflow(Label *label, Register overflow_check, BranchDelaySlot bd=PROTECT)
void JumpIfNotBothSmi(Register reg1, Register reg2, Label *on_not_both_smi)
void InvokeCode(Register code, const ParameterCount &expected, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
void FmoveLow(Register dst_low, FPURegister src)
void MovToFloatParameter(DoubleRegister src)
void Push(Register src, Condition cond, Register tst1, Register tst2)
void SmiTagCheckOverflow(Register reg, Register overflow)
void Usw(Register rd, const MemOperand &rs)
void MovToFloatResult(DoubleRegister src)
void SubuAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch=at)
void TrySmiTag(Register reg, Register scratch, Label *not_a_smi)
void Pop(Register src1, Register src2)
void CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments)
void LoadInstanceDescriptors(Register map, Register descriptors)
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch)
void Ceil_w_d(FPURegister fd, FPURegister fs)
void CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments)
void Load(Register dst, const MemOperand &src, Representation r)
void DecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size)
void CheckMap(Register obj, Register scratch, Heap::RootListIndex index, Label *fail, SmiCheckType smi_check_type)
void UntagAndJumpIfSmi(Register dst, Register src, Label *smi_case)
void CallCFunction(Register function, int num_arguments)
void LoadFromNumberDictionary(Label *miss, Register elements, Register key, Register result, Register reg0, Register reg1, Register reg2)
void TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size)
void PrepareCEntryFunction(const ExternalReference &ref)
void AllocateTwoByteSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size)
void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch, Label *failure)
void Jump(Handle< Code > code, RelocInfo::Mode rmode, COND_ARGS)
void DecodeFieldToSmi(Register dst, Register src)
void SmiTst(Register value, Register scratch)
void LookupNumberStringCache(Register object, Register result, Register scratch1, Register scratch2, Register scratch3, Label *not_found)
void Prologue(bool code_pre_aging)
void CheckFastElements(Register map, Register scratch, Label *fail)
void SetCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void GetMarkBits(Register addr_reg, Register bitmap_reg, Register mask_reg)
void BranchOnNoOverflow(Label *label, Register overflow_check, BranchDelaySlot bd=PROTECT)
void TestJSArrayForAllocationMemento(Register receiver_reg, Register scratch_reg, Label *no_memento_found, Condition cond=al, Label *allocation_memento_present=NULL)
void TruncatingDiv(Register result, Register dividend, int32_t divisor)
void CallStub(CodeStub *stub, TypeFeedbackId ast_id=TypeFeedbackId::None(), COND_ARGS)
void EnterExitFrame(bool save_doubles, int stack_space=0)
MacroAssembler(Isolate *isolate, void *buffer, int size)
MemOperand SafepointRegisterSlot(Register reg)
void J(Label *L, BranchDelaySlot bdslot)
void LoadFromSafepointRegisterSlot(Register dst, Register src)
void IncrementalMarkingRecordWriteHelper(Register object, Register value, Register address)
void JumpIfNotHeapNumber(Register object, Register heap_number_map, Register scratch, Label *on_not_heap_number)
void RecordWrite(Register object, Register address, Register value, RAStatus ra_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK, PointersToHereCheck pointers_to_here_check_for_value=kPointersToHereMaybeInteresting)
void Call(Label *target)
void AdduAndCheckForOverflow(Register dst, Register left, const Operand &right, Register overflow_dst, Register scratch=at)
void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch)
void IsObjectJSObjectType(Register heap_object, Register map, Register scratch, Label *fail)
void InvokePrologue(const ParameterCount &expected, const ParameterCount &actual, Handle< Code > code_constant, Register code_reg, Label *done, bool *definitely_mismatches, InvokeFlag flag, const CallWrapper &call_wrapper)
void AssertIsRoot(Register reg, Heap::RootListIndex index)
void AssertUndefinedOrAllocationSite(Register object, Register scratch)
void CallCFunction(Register function, int num_reg_arguments, int num_double_arguments)
void LoadGlobalFunction(int index, Register function)
static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS)
void AllocateOneByteConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void AssertSmi(Register object)
void Push(Register src1, Register src2, Register src3, Register src4)
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS)
void LoadNumber(Register object, FPURegister dst, Register heap_number_map, Register scratch, Label *not_number)
void EmitSeqStringSetCharCheck(Register string, Register index, Register value, Register scratch, uint32_t encoding_mask)
void FlushICache(Register address, unsigned instructions)
void LoadNumberAsInt32Double(Register object, DoubleRegister double_dst, Register heap_number_map, Register scratch1, Register scratch2, FPURegister double_scratch, Label *not_int32)
void RecordWriteField(Register object, int offset, Register value, Register scratch, RAStatus ra_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK, PointersToHereCheck pointers_to_here_check_for_value=kPointersToHereMaybeInteresting)
MemOperand SafepointRegistersAndDoublesSlot(Register reg)
void JumpIfSmi(Register value, Label *smi_label, Register scratch=at, BranchDelaySlot bd=PROTECT)
void TruncateHeapNumberToI(Register result, Register object)
void Movn(Register rd, Register rs, Register rt)
void Ulw(Register rd, const MemOperand &rs)
void li(Register dst, Handle< Object > value, LiFlags mode=OPTIMIZE_SIZE)
void Movf(Register rd, Register rs, uint16_t cc=0)
void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg, DoubleRegister temp_double_reg)
void MarkCode(NopMarkerTypes type)
void ThrowUncatchable(Register value)
void RememberedSetHelper(Register object, Register addr, Register scratch, SaveFPRegsMode save_fp, RememberedSetFinalAction and_then)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, Register scratch)
void GetBuiltinEntry(Register target, Builtins::JavaScript id)
void TestJSArrayForAllocationMemento(Register receiver_reg, Register scratch_reg, Label *no_memento_found)
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, Register scratch_reg, Label *memento_found)
void BranchF(Label *target, Label *nan, Condition cc, FPURegister cmp1, FPURegister cmp2, BranchDelaySlot bd=PROTECT)
void Pref(int32_t hint, const MemOperand &rs)
void Round_w_d(FPURegister fd, FPURegister fs)
void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch, Label *not_power_of_two_or_zero)
void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first, Register second, Register scratch1, Register scratch2, Label *failure)
void JumpIfNotBothSequentialOneByteStrings(Register first, Register second, Register scratch1, Register scratch2, Label *not_flat_one_byte_strings)
void TrySmiTag(Register reg, Label *not_a_smi)
void Push(Register src1, Register src2)
void NumberOfOwnDescriptors(Register dst, Register map)
void MovFromFloatResult(DoubleRegister dst)
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required, TaggingMode tagging_mode=TAG_RESULT, MutableMode mode=IMMUTABLE)
void SmiToDoubleFPURegister(Register smi, FPURegister value, Register scratch1)
void HasColor(Register object, Register scratch0, Register scratch1, Label *has_color, int first_bit, int second_bit)
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, Label *done)
void CompareMapAndBranch(Register obj, Register scratch, Handle< Map > map, Label *early_success, Condition cond, Label *branch_to)
void ObjectToDoubleFPURegister(Register object, FPURegister value, Register scratch1, Register scratch2, Register heap_number_map, Label *not_number, ObjectToDoubleFlags flags=NO_OBJECT_TO_DOUBLE_FLAGS)
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, Register scratch1, Label *found)
void AssertNotSmi(Register object)
void Floor_w_d(FPURegister fd, FPURegister fs)
void AdduAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch=at)
void Move(Register dst_low, Register dst_high, FPURegister src)
void EmitFPUTruncate(FPURoundingMode rounding_mode, Register result, DoubleRegister double_input, Register scratch, DoubleRegister double_scratch, Register except_flag, CheckForInexactConversion check_inexact=kDontCheckForInexactConversion)
void JumpIfBlack(Register object, Register scratch0, Register scratch1, Label *on_black)
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label *condition_met)
void CheckMap(Register obj, Register scratch, Handle< Map > map, Label *fail, SmiCheckType smi_check_type)
void MultiPopReversedFPU(RegList regs)
void MultiPushReversed(RegList regs)
void CallRuntimeSaveDoubles(Runtime::FunctionId id)
void Push(Handle< Object > handle)
void CopyFields(Register dst, Register src, RegList temps, int field_count)
void Mfhc1(Register rt, FPURegister fs)
static const int kInstanceTypeOffset
Definition: objects.h:6229
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:9312
static TypeFeedbackId None()
Definition: utils.h:945
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
#define DECLARE_BRANCH_PROTOTYPES(Name)
#define COND_ARGS
InvokeFlag
AllocationFlags
unsigned short uint16_t
Definition: unicode.cc:23
signed short int16_t
Definition: unicode.cc:22
int int32_t
Definition: unicode.cc:24
const int kPointerSize
Definition: globals.h:129
const Register kRootRegister
const int kRtShift
MemOperand ContextOperand(Register context, int index)
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const Register cp
TypeImpl< ZoneTypeConfig > Type
@ kDontCheckForInexactConversion
const intptr_t kSmiSignMask
Definition: globals.h:223
const int kSmiTagSize
Definition: v8.h:5743
const int kRsFieldMask
int ToNumber(Register reg)
const Register sp
const uint32_t kStringTag
Definition: objects.h:544
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:146
MemOperand FieldMemOperand(Register object, int offset)
const int kRsShift
MemOperand CFunctionArgumentOperand(int index)
byte * Address
Definition: globals.h:101
const int kCArgsSlotsSize
const int kHeapObjectTag
Definition: v8.h:5737
const Register no_reg
kFeedbackVectorOffset flag
Definition: objects-inl.h:5418
const int kRtFieldMask
MemOperand GlobalObjectOperand()
const intptr_t kSmiTagMask
Definition: v8.h:5744
const int kOpcodeMask
const int kSaFieldMask
const int kSmiTag
Definition: v8.h:5742
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
const uint32_t kIsNotStringMask
Definition: objects.h:543
@ kPointersToHereAreAlwaysInteresting
const int kSaShift
const int kCArgSlotCount
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
bool is(FPURegister creg) const
bool is(Register reg) const