V8 Project
macro-assembler-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6 
7 #include "src/v8.h"
8 
9 #if V8_TARGET_ARCH_MIPS
10 
11 #include "src/base/bits.h"
13 #include "src/bootstrapper.h"
14 #include "src/codegen.h"
15 #include "src/cpu-profiler.h"
16 #include "src/debug.h"
17 #include "src/isolate-inl.h"
18 #include "src/runtime/runtime.h"
19 
20 namespace v8 {
21 namespace internal {
22 
23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
24  : Assembler(arg_isolate, buffer, size),
25  generating_stub_(false),
26  has_frame_(false) {
27  if (isolate() != NULL) {
28  code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
29  isolate());
30  }
31 }
32 
33 
34 void MacroAssembler::Load(Register dst,
35  const MemOperand& src,
36  Representation r) {
37  DCHECK(!r.IsDouble());
38  if (r.IsInteger8()) {
39  lb(dst, src);
40  } else if (r.IsUInteger8()) {
41  lbu(dst, src);
42  } else if (r.IsInteger16()) {
43  lh(dst, src);
44  } else if (r.IsUInteger16()) {
45  lhu(dst, src);
46  } else {
47  lw(dst, src);
48  }
49 }
50 
51 
52 void MacroAssembler::Store(Register src,
53  const MemOperand& dst,
54  Representation r) {
55  DCHECK(!r.IsDouble());
56  if (r.IsInteger8() || r.IsUInteger8()) {
57  sb(src, dst);
58  } else if (r.IsInteger16() || r.IsUInteger16()) {
59  sh(src, dst);
60  } else {
61  if (r.IsHeapObject()) {
62  AssertNotSmi(src);
63  } else if (r.IsSmi()) {
64  AssertSmi(src);
65  }
66  sw(src, dst);
67  }
68 }
69 
70 
71 void MacroAssembler::LoadRoot(Register destination,
72  Heap::RootListIndex index) {
73  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
74 }
75 
76 
77 void MacroAssembler::LoadRoot(Register destination,
78  Heap::RootListIndex index,
79  Condition cond,
80  Register src1, const Operand& src2) {
81  Branch(2, NegateCondition(cond), src1, src2);
82  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
83 }
84 
85 
86 void MacroAssembler::StoreRoot(Register source,
87  Heap::RootListIndex index) {
88  sw(source, MemOperand(s6, index << kPointerSizeLog2));
89 }
90 
91 
92 void MacroAssembler::StoreRoot(Register source,
93  Heap::RootListIndex index,
94  Condition cond,
95  Register src1, const Operand& src2) {
96  Branch(2, NegateCondition(cond), src1, src2);
97  sw(source, MemOperand(s6, index << kPointerSizeLog2));
98 }
99 
100 
101 // Push and pop all registers that can hold pointers.
102 void MacroAssembler::PushSafepointRegisters() {
103  // Safepoints expect a block of kNumSafepointRegisters values on the
104  // stack, so adjust the stack for unsaved registers.
105  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
106  DCHECK(num_unsaved >= 0);
107  if (num_unsaved > 0) {
108  Subu(sp, sp, Operand(num_unsaved * kPointerSize));
109  }
110  MultiPush(kSafepointSavedRegisters);
111 }
112 
113 
114 void MacroAssembler::PopSafepointRegisters() {
115  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
116  MultiPop(kSafepointSavedRegisters);
117  if (num_unsaved > 0) {
118  Addu(sp, sp, Operand(num_unsaved * kPointerSize));
119  }
120 }
121 
122 
123 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
124  sw(src, SafepointRegisterSlot(dst));
125 }
126 
127 
128 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
129  lw(dst, SafepointRegisterSlot(src));
130 }
131 
132 
133 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
134  // The registers are pushed starting with the highest encoding,
135  // which means that lowest encodings are closest to the stack pointer.
136  return kSafepointRegisterStackIndexMap[reg_code];
137 }
138 
139 
140 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
141  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
142 }
143 
144 
145 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
147  // General purpose registers are pushed last on the stack.
148  int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
149  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
150  return MemOperand(sp, doubles_size + register_offset);
151 }
152 
153 
154 void MacroAssembler::InNewSpace(Register object,
155  Register scratch,
156  Condition cc,
157  Label* branch) {
158  DCHECK(cc == eq || cc == ne);
159  And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
160  Branch(branch, cc, scratch,
161  Operand(ExternalReference::new_space_start(isolate())));
162 }
163 
164 
165 void MacroAssembler::RecordWriteField(
166  Register object,
167  int offset,
168  Register value,
169  Register dst,
170  RAStatus ra_status,
171  SaveFPRegsMode save_fp,
172  RememberedSetAction remembered_set_action,
173  SmiCheck smi_check,
174  PointersToHereCheck pointers_to_here_check_for_value) {
175  DCHECK(!AreAliased(value, dst, t8, object));
176  // First, check if a write barrier is even needed. The tests below
177  // catch stores of Smis.
178  Label done;
179 
180  // Skip barrier if writing a smi.
181  if (smi_check == INLINE_SMI_CHECK) {
182  JumpIfSmi(value, &done);
183  }
184 
185  // Although the object register is tagged, the offset is relative to the start
186  // of the object, so so offset must be a multiple of kPointerSize.
187  DCHECK(IsAligned(offset, kPointerSize));
188 
189  Addu(dst, object, Operand(offset - kHeapObjectTag));
190  if (emit_debug_code()) {
191  Label ok;
192  And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
193  Branch(&ok, eq, t8, Operand(zero_reg));
194  stop("Unaligned cell in write barrier");
195  bind(&ok);
196  }
197 
198  RecordWrite(object,
199  dst,
200  value,
201  ra_status,
202  save_fp,
203  remembered_set_action,
205  pointers_to_here_check_for_value);
206 
207  bind(&done);
208 
209  // Clobber clobbered input registers when running with the debug-code flag
210  // turned on to provoke errors.
211  if (emit_debug_code()) {
212  li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
213  li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
214  }
215 }
216 
217 
218 // Will clobber 4 registers: object, map, dst, ip. The
219 // register 'object' contains a heap object pointer.
220 void MacroAssembler::RecordWriteForMap(Register object,
221  Register map,
222  Register dst,
223  RAStatus ra_status,
224  SaveFPRegsMode fp_mode) {
225  if (emit_debug_code()) {
226  DCHECK(!dst.is(at));
227  lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
228  Check(eq,
229  kWrongAddressOrValuePassedToRecordWrite,
230  dst,
231  Operand(isolate()->factory()->meta_map()));
232  }
233 
234  if (!FLAG_incremental_marking) {
235  return;
236  }
237 
238  if (emit_debug_code()) {
239  lw(at, FieldMemOperand(object, HeapObject::kMapOffset));
240  Check(eq,
241  kWrongAddressOrValuePassedToRecordWrite,
242  map,
243  Operand(at));
244  }
245 
246  Label done;
247 
248  // A single check of the map's pages interesting flag suffices, since it is
249  // only set during incremental collection, and then it's also guaranteed that
250  // the from object's page's interesting flag is also set. This optimization
251  // relies on the fact that maps can never be in new space.
252  CheckPageFlag(map,
253  map, // Used as scratch.
254  MemoryChunk::kPointersToHereAreInterestingMask,
255  eq,
256  &done);
257 
258  Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
259  if (emit_debug_code()) {
260  Label ok;
261  And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
262  Branch(&ok, eq, at, Operand(zero_reg));
263  stop("Unaligned cell in write barrier");
264  bind(&ok);
265  }
266 
267  // Record the actual write.
268  if (ra_status == kRAHasNotBeenSaved) {
269  push(ra);
270  }
271  RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
272  fp_mode);
273  CallStub(&stub);
274  if (ra_status == kRAHasNotBeenSaved) {
275  pop(ra);
276  }
277 
278  bind(&done);
279 
280  // Count number of write barriers in generated code.
281  isolate()->counters()->write_barriers_static()->Increment();
282  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
283 
284  // Clobber clobbered registers when running with the debug-code flag
285  // turned on to provoke errors.
286  if (emit_debug_code()) {
287  li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
288  li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
289  }
290 }
291 
292 
293 // Will clobber 4 registers: object, address, scratch, ip. The
294 // register 'object' contains a heap object pointer. The heap object
295 // tag is shifted away.
296 void MacroAssembler::RecordWrite(
297  Register object,
298  Register address,
299  Register value,
300  RAStatus ra_status,
301  SaveFPRegsMode fp_mode,
302  RememberedSetAction remembered_set_action,
303  SmiCheck smi_check,
304  PointersToHereCheck pointers_to_here_check_for_value) {
305  DCHECK(!AreAliased(object, address, value, t8));
306  DCHECK(!AreAliased(object, address, value, t9));
307 
308  if (emit_debug_code()) {
309  lw(at, MemOperand(address));
310  Assert(
311  eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
312  }
313 
314  if (remembered_set_action == OMIT_REMEMBERED_SET &&
315  !FLAG_incremental_marking) {
316  return;
317  }
318 
319  // First, check if a write barrier is even needed. The tests below
320  // catch stores of smis and stores into the young generation.
321  Label done;
322 
323  if (smi_check == INLINE_SMI_CHECK) {
324  DCHECK_EQ(0, kSmiTag);
325  JumpIfSmi(value, &done);
326  }
327 
328  if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
329  CheckPageFlag(value,
330  value, // Used as scratch.
331  MemoryChunk::kPointersToHereAreInterestingMask,
332  eq,
333  &done);
334  }
335  CheckPageFlag(object,
336  value, // Used as scratch.
337  MemoryChunk::kPointersFromHereAreInterestingMask,
338  eq,
339  &done);
340 
341  // Record the actual write.
342  if (ra_status == kRAHasNotBeenSaved) {
343  push(ra);
344  }
345  RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
346  fp_mode);
347  CallStub(&stub);
348  if (ra_status == kRAHasNotBeenSaved) {
349  pop(ra);
350  }
351 
352  bind(&done);
353 
354  // Count number of write barriers in generated code.
355  isolate()->counters()->write_barriers_static()->Increment();
356  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
357  value);
358 
359  // Clobber clobbered registers when running with the debug-code flag
360  // turned on to provoke errors.
361  if (emit_debug_code()) {
362  li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
363  li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
364  }
365 }
366 
367 
368 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
369  Register address,
370  Register scratch,
371  SaveFPRegsMode fp_mode,
372  RememberedSetFinalAction and_then) {
373  Label done;
374  if (emit_debug_code()) {
375  Label ok;
376  JumpIfNotInNewSpace(object, scratch, &ok);
377  stop("Remembered set pointer is in new space");
378  bind(&ok);
379  }
380  // Load store buffer top.
381  ExternalReference store_buffer =
382  ExternalReference::store_buffer_top(isolate());
383  li(t8, Operand(store_buffer));
384  lw(scratch, MemOperand(t8));
385  // Store pointer to buffer and increment buffer top.
386  sw(address, MemOperand(scratch));
387  Addu(scratch, scratch, kPointerSize);
388  // Write back new top of buffer.
389  sw(scratch, MemOperand(t8));
390  // Call stub on end of buffer.
391  // Check for end of buffer.
392  And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
393  if (and_then == kFallThroughAtEnd) {
394  Branch(&done, eq, t8, Operand(zero_reg));
395  } else {
396  DCHECK(and_then == kReturnAtEnd);
397  Ret(eq, t8, Operand(zero_reg));
398  }
399  push(ra);
400  StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
401  CallStub(&store_buffer_overflow);
402  pop(ra);
403  bind(&done);
404  if (and_then == kReturnAtEnd) {
405  Ret();
406  }
407 }
408 
409 
410 // -----------------------------------------------------------------------------
411 // Allocation support.
412 
413 
414 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
415  Register scratch,
416  Label* miss) {
417  Label same_contexts;
418 
419  DCHECK(!holder_reg.is(scratch));
420  DCHECK(!holder_reg.is(at));
421  DCHECK(!scratch.is(at));
422 
423  // Load current lexical context from the stack frame.
424  lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
425  // In debug mode, make sure the lexical context is set.
426 #ifdef DEBUG
427  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
428  scratch, Operand(zero_reg));
429 #endif
430 
431  // Load the native context of the current context.
432  int offset =
433  Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
434  lw(scratch, FieldMemOperand(scratch, offset));
435  lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
436 
437  // Check the context is a native context.
438  if (emit_debug_code()) {
439  push(holder_reg); // Temporarily save holder on the stack.
440  // Read the first word and compare to the native_context_map.
441  lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
442  LoadRoot(at, Heap::kNativeContextMapRootIndex);
443  Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
444  holder_reg, Operand(at));
445  pop(holder_reg); // Restore holder.
446  }
447 
448  // Check if both contexts are the same.
449  lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
450  Branch(&same_contexts, eq, scratch, Operand(at));
451 
452  // Check the context is a native context.
453  if (emit_debug_code()) {
454  push(holder_reg); // Temporarily save holder on the stack.
455  mov(holder_reg, at); // Move at to its holding place.
456  LoadRoot(at, Heap::kNullValueRootIndex);
457  Check(ne, kJSGlobalProxyContextShouldNotBeNull,
458  holder_reg, Operand(at));
459 
460  lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
461  LoadRoot(at, Heap::kNativeContextMapRootIndex);
462  Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
463  holder_reg, Operand(at));
464  // Restore at is not needed. at is reloaded below.
465  pop(holder_reg); // Restore holder.
466  // Restore at to holder's context.
467  lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
468  }
469 
470  // Check that the security token in the calling global object is
471  // compatible with the security token in the receiving global
472  // object.
473  int token_offset = Context::kHeaderSize +
474  Context::SECURITY_TOKEN_INDEX * kPointerSize;
475 
476  lw(scratch, FieldMemOperand(scratch, token_offset));
477  lw(at, FieldMemOperand(at, token_offset));
478  Branch(miss, ne, scratch, Operand(at));
479 
480  bind(&same_contexts);
481 }
482 
483 
484 // Compute the hash code from the untagged key. This must be kept in sync with
485 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
486 // code-stub-hydrogen.cc
487 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
488  // First of all we assign the hash seed to scratch.
489  LoadRoot(scratch, Heap::kHashSeedRootIndex);
490  SmiUntag(scratch);
491 
492  // Xor original key with a seed.
493  xor_(reg0, reg0, scratch);
494 
495  // Compute the hash code from the untagged key. This must be kept in sync
496  // with ComputeIntegerHash in utils.h.
497  //
498  // hash = ~hash + (hash << 15);
499  nor(scratch, reg0, zero_reg);
500  sll(at, reg0, 15);
501  addu(reg0, scratch, at);
502 
503  // hash = hash ^ (hash >> 12);
504  srl(at, reg0, 12);
505  xor_(reg0, reg0, at);
506 
507  // hash = hash + (hash << 2);
508  sll(at, reg0, 2);
509  addu(reg0, reg0, at);
510 
511  // hash = hash ^ (hash >> 4);
512  srl(at, reg0, 4);
513  xor_(reg0, reg0, at);
514 
515  // hash = hash * 2057;
516  sll(scratch, reg0, 11);
517  sll(at, reg0, 3);
518  addu(reg0, reg0, at);
519  addu(reg0, reg0, scratch);
520 
521  // hash = hash ^ (hash >> 16);
522  srl(at, reg0, 16);
523  xor_(reg0, reg0, at);
524 }
525 
526 
527 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
528  Register elements,
529  Register key,
530  Register result,
531  Register reg0,
532  Register reg1,
533  Register reg2) {
534  // Register use:
535  //
536  // elements - holds the slow-case elements of the receiver on entry.
537  // Unchanged unless 'result' is the same register.
538  //
539  // key - holds the smi key on entry.
540  // Unchanged unless 'result' is the same register.
541  //
542  //
543  // result - holds the result on exit if the load succeeded.
544  // Allowed to be the same as 'key' or 'result'.
545  // Unchanged on bailout so 'key' or 'result' can be used
546  // in further computation.
547  //
548  // Scratch registers:
549  //
550  // reg0 - holds the untagged key on entry and holds the hash once computed.
551  //
552  // reg1 - Used to hold the capacity mask of the dictionary.
553  //
554  // reg2 - Used for the index into the dictionary.
555  // at - Temporary (avoid MacroAssembler instructions also using 'at').
556  Label done;
557 
558  GetNumberHash(reg0, reg1);
559 
560  // Compute the capacity mask.
561  lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
562  sra(reg1, reg1, kSmiTagSize);
563  Subu(reg1, reg1, Operand(1));
564 
565  // Generate an unrolled loop that performs a few probes before giving up.
566  for (int i = 0; i < kNumberDictionaryProbes; i++) {
567  // Use reg2 for index calculations and keep the hash intact in reg0.
568  mov(reg2, reg0);
569  // Compute the masked index: (hash + i + i * i) & mask.
570  if (i > 0) {
571  Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
572  }
573  and_(reg2, reg2, reg1);
574 
575  // Scale the index by multiplying by the element size.
576  DCHECK(SeededNumberDictionary::kEntrySize == 3);
577  sll(at, reg2, 1); // 2x.
578  addu(reg2, reg2, at); // reg2 = reg2 * 3.
579 
580  // Check if the key is identical to the name.
581  sll(at, reg2, kPointerSizeLog2);
582  addu(reg2, elements, at);
583 
584  lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
585  if (i != kNumberDictionaryProbes - 1) {
586  Branch(&done, eq, key, Operand(at));
587  } else {
588  Branch(miss, ne, key, Operand(at));
589  }
590  }
591 
592  bind(&done);
593  // Check that the value is a normal property.
594  // reg2: elements + (index * kPointerSize).
595  const int kDetailsOffset =
596  SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
597  lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
598  And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
599  Branch(miss, ne, at, Operand(zero_reg));
600 
601  // Get the value at the masked, scaled index and return.
602  const int kValueOffset =
603  SeededNumberDictionary::kElementsStartOffset + kPointerSize;
604  lw(result, FieldMemOperand(reg2, kValueOffset));
605 }
606 
607 
608 // ---------------------------------------------------------------------------
609 // Instruction macros.
610 
611 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
612  if (rt.is_reg()) {
613  addu(rd, rs, rt.rm());
614  } else {
615  if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
616  addiu(rd, rs, rt.imm32_);
617  } else {
618  // li handles the relocation.
619  DCHECK(!rs.is(at));
620  li(at, rt);
621  addu(rd, rs, at);
622  }
623  }
624 }
625 
626 
627 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
628  if (rt.is_reg()) {
629  subu(rd, rs, rt.rm());
630  } else {
631  if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
632  addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
633  } else {
634  // li handles the relocation.
635  DCHECK(!rs.is(at));
636  li(at, rt);
637  subu(rd, rs, at);
638  }
639  }
640 }
641 
642 
643 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
644  if (rt.is_reg()) {
646  mult(rs, rt.rm());
647  mflo(rd);
648  } else {
649  mul(rd, rs, rt.rm());
650  }
651  } else {
652  // li handles the relocation.
653  DCHECK(!rs.is(at));
654  li(at, rt);
656  mult(rs, at);
657  mflo(rd);
658  } else {
659  mul(rd, rs, at);
660  }
661  }
662 }
663 
664 
665 void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
666  Register rs, const Operand& rt) {
667  if (rt.is_reg()) {
669  mult(rs, rt.rm());
670  mflo(rd_lo);
671  mfhi(rd_hi);
672  } else {
673  if (rd_lo.is(rs)) {
674  DCHECK(!rd_hi.is(rs));
675  DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
676  muh(rd_hi, rs, rt.rm());
677  mul(rd_lo, rs, rt.rm());
678  } else {
679  DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
680  mul(rd_lo, rs, rt.rm());
681  muh(rd_hi, rs, rt.rm());
682  }
683  }
684  } else {
685  // li handles the relocation.
686  DCHECK(!rs.is(at));
687  li(at, rt);
689  mult(rs, at);
690  mflo(rd_lo);
691  mfhi(rd_hi);
692  } else {
693  if (rd_lo.is(rs)) {
694  DCHECK(!rd_hi.is(rs));
695  DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
696  muh(rd_hi, rs, at);
697  mul(rd_lo, rs, at);
698  } else {
699  DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
700  mul(rd_lo, rs, at);
701  muh(rd_hi, rs, at);
702  }
703  }
704  }
705 }
706 
707 
708 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
709  if (rt.is_reg()) {
711  mult(rs, rt.rm());
712  mfhi(rd);
713  } else {
714  muh(rd, rs, rt.rm());
715  }
716  } else {
717  // li handles the relocation.
718  DCHECK(!rs.is(at));
719  li(at, rt);
721  mult(rs, at);
722  mfhi(rd);
723  } else {
724  muh(rd, rs, at);
725  }
726  }
727 }
728 
729 
730 void MacroAssembler::Mult(Register rs, const Operand& rt) {
731  if (rt.is_reg()) {
732  mult(rs, rt.rm());
733  } else {
734  // li handles the relocation.
735  DCHECK(!rs.is(at));
736  li(at, rt);
737  mult(rs, at);
738  }
739 }
740 
741 
742 void MacroAssembler::Multu(Register rs, const Operand& rt) {
743  if (rt.is_reg()) {
744  multu(rs, rt.rm());
745  } else {
746  // li handles the relocation.
747  DCHECK(!rs.is(at));
748  li(at, rt);
749  multu(rs, at);
750  }
751 }
752 
753 
754 void MacroAssembler::Div(Register rs, const Operand& rt) {
755  if (rt.is_reg()) {
756  div(rs, rt.rm());
757  } else {
758  // li handles the relocation.
759  DCHECK(!rs.is(at));
760  li(at, rt);
761  div(rs, at);
762  }
763 }
764 
765 
766 void MacroAssembler::Div(Register rem, Register res,
767  Register rs, const Operand& rt) {
768  if (rt.is_reg()) {
770  div(rs, rt.rm());
771  mflo(res);
772  mfhi(rem);
773  } else {
774  div(res, rs, rt.rm());
775  mod(rem, rs, rt.rm());
776  }
777  } else {
778  // li handles the relocation.
779  DCHECK(!rs.is(at));
780  li(at, rt);
782  div(rs, at);
783  mflo(res);
784  mfhi(rem);
785  } else {
786  div(res, rs, at);
787  mod(rem, rs, at);
788  }
789  }
790 }
791 
792 
793 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
794  if (rt.is_reg()) {
796  div(rs, rt.rm());
797  mflo(res);
798  } else {
799  div(res, rs, rt.rm());
800  }
801  } else {
802  // li handles the relocation.
803  DCHECK(!rs.is(at));
804  li(at, rt);
806  div(rs, at);
807  mflo(res);
808  } else {
809  div(res, rs, at);
810  }
811  }
812 }
813 
814 
815 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
816  if (rt.is_reg()) {
818  div(rs, rt.rm());
819  mfhi(rd);
820  } else {
821  mod(rd, rs, rt.rm());
822  }
823  } else {
824  // li handles the relocation.
825  DCHECK(!rs.is(at));
826  li(at, rt);
828  div(rs, at);
829  mfhi(rd);
830  } else {
831  mod(rd, rs, at);
832  }
833  }
834 }
835 
836 
837 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
838  if (rt.is_reg()) {
840  divu(rs, rt.rm());
841  mfhi(rd);
842  } else {
843  modu(rd, rs, rt.rm());
844  }
845  } else {
846  // li handles the relocation.
847  DCHECK(!rs.is(at));
848  li(at, rt);
850  divu(rs, at);
851  mfhi(rd);
852  } else {
853  modu(rd, rs, at);
854  }
855  }
856 }
857 
858 
859 void MacroAssembler::Divu(Register rs, const Operand& rt) {
860  if (rt.is_reg()) {
861  divu(rs, rt.rm());
862  } else {
863  // li handles the relocation.
864  DCHECK(!rs.is(at));
865  li(at, rt);
866  divu(rs, at);
867  }
868 }
869 
870 
871 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
872  if (rt.is_reg()) {
874  divu(rs, rt.rm());
875  mflo(res);
876  } else {
877  divu(res, rs, rt.rm());
878  }
879  } else {
880  // li handles the relocation.
881  DCHECK(!rs.is(at));
882  li(at, rt);
884  divu(rs, at);
885  mflo(res);
886  } else {
887  divu(res, rs, at);
888  }
889  }
890 }
891 
892 
893 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
894  if (rt.is_reg()) {
895  and_(rd, rs, rt.rm());
896  } else {
897  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
898  andi(rd, rs, rt.imm32_);
899  } else {
900  // li handles the relocation.
901  DCHECK(!rs.is(at));
902  li(at, rt);
903  and_(rd, rs, at);
904  }
905  }
906 }
907 
908 
909 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
910  if (rt.is_reg()) {
911  or_(rd, rs, rt.rm());
912  } else {
913  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
914  ori(rd, rs, rt.imm32_);
915  } else {
916  // li handles the relocation.
917  DCHECK(!rs.is(at));
918  li(at, rt);
919  or_(rd, rs, at);
920  }
921  }
922 }
923 
924 
925 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
926  if (rt.is_reg()) {
927  xor_(rd, rs, rt.rm());
928  } else {
929  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
930  xori(rd, rs, rt.imm32_);
931  } else {
932  // li handles the relocation.
933  DCHECK(!rs.is(at));
934  li(at, rt);
935  xor_(rd, rs, at);
936  }
937  }
938 }
939 
940 
941 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
942  if (rt.is_reg()) {
943  nor(rd, rs, rt.rm());
944  } else {
945  // li handles the relocation.
946  DCHECK(!rs.is(at));
947  li(at, rt);
948  nor(rd, rs, at);
949  }
950 }
951 
952 
953 void MacroAssembler::Neg(Register rs, const Operand& rt) {
954  DCHECK(rt.is_reg());
955  DCHECK(!at.is(rs));
956  DCHECK(!at.is(rt.rm()));
957  li(at, -1);
958  xor_(rs, rt.rm(), at);
959 }
960 
961 
962 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
963  if (rt.is_reg()) {
964  slt(rd, rs, rt.rm());
965  } else {
966  if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
967  slti(rd, rs, rt.imm32_);
968  } else {
969  // li handles the relocation.
970  DCHECK(!rs.is(at));
971  li(at, rt);
972  slt(rd, rs, at);
973  }
974  }
975 }
976 
977 
978 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
979  if (rt.is_reg()) {
980  sltu(rd, rs, rt.rm());
981  } else {
982  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
983  sltiu(rd, rs, rt.imm32_);
984  } else {
985  // li handles the relocation.
986  DCHECK(!rs.is(at));
987  li(at, rt);
988  sltu(rd, rs, at);
989  }
990  }
991 }
992 
993 
994 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
996  if (rt.is_reg()) {
997  rotrv(rd, rs, rt.rm());
998  } else {
999  rotr(rd, rs, rt.imm32_);
1000  }
1001  } else {
1002  if (rt.is_reg()) {
1003  subu(at, zero_reg, rt.rm());
1004  sllv(at, rs, at);
1005  srlv(rd, rs, rt.rm());
1006  or_(rd, rd, at);
1007  } else {
1008  if (rt.imm32_ == 0) {
1009  srl(rd, rs, 0);
1010  } else {
1011  srl(at, rs, rt.imm32_);
1012  sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
1013  or_(rd, rd, at);
1014  }
1015  }
1016  }
1017 }
1018 
1019 
1020 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1022  lw(zero_reg, rs);
1023  } else {
1024  pref(hint, rs);
1025  }
1026 }
1027 
1028 
1029 // ------------Pseudo-instructions-------------
1030 
1031 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1032  lwr(rd, rs);
1033  lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1034 }
1035 
1036 
1037 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1038  swr(rd, rs);
1039  swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1040 }
1041 
1042 
1043 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1045  if (value->IsSmi()) {
1046  li(dst, Operand(value), mode);
1047  } else {
1048  DCHECK(value->IsHeapObject());
1049  if (isolate()->heap()->InNewSpace(*value)) {
1050  Handle<Cell> cell = isolate()->factory()->NewCell(value);
1051  li(dst, Operand(cell));
1052  lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
1053  } else {
1054  li(dst, Operand(value));
1055  }
1056  }
1057 }
1058 
1059 
1060 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1061  DCHECK(!j.is_reg());
1062  BlockTrampolinePoolScope block_trampoline_pool(this);
1063  if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1064  // Normal load of an immediate value which does not need Relocation Info.
1065  if (is_int16(j.imm32_)) {
1066  addiu(rd, zero_reg, j.imm32_);
1067  } else if (!(j.imm32_ & kHiMask)) {
1068  ori(rd, zero_reg, j.imm32_);
1069  } else if (!(j.imm32_ & kImm16Mask)) {
1070  lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1071  } else {
1072  lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1073  ori(rd, rd, (j.imm32_ & kImm16Mask));
1074  }
1075  } else {
1076  if (MustUseReg(j.rmode_)) {
1077  RecordRelocInfo(j.rmode_, j.imm32_);
1078  }
1079  // We always need the same number of instructions as we may need to patch
1080  // this code to load another value which may need 2 instructions to load.
1081  lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1082  ori(rd, rd, (j.imm32_ & kImm16Mask));
1083  }
1084 }
1085 
1086 
1087 void MacroAssembler::MultiPush(RegList regs) {
1088  int16_t num_to_push = NumberOfBitsSet(regs);
1089  int16_t stack_offset = num_to_push * kPointerSize;
1090 
1091  Subu(sp, sp, Operand(stack_offset));
1092  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1093  if ((regs & (1 << i)) != 0) {
1094  stack_offset -= kPointerSize;
1095  sw(ToRegister(i), MemOperand(sp, stack_offset));
1096  }
1097  }
1098 }
1099 
1100 
1101 void MacroAssembler::MultiPushReversed(RegList regs) {
1102  int16_t num_to_push = NumberOfBitsSet(regs);
1103  int16_t stack_offset = num_to_push * kPointerSize;
1104 
1105  Subu(sp, sp, Operand(stack_offset));
1106  for (int16_t i = 0; i < kNumRegisters; i++) {
1107  if ((regs & (1 << i)) != 0) {
1108  stack_offset -= kPointerSize;
1109  sw(ToRegister(i), MemOperand(sp, stack_offset));
1110  }
1111  }
1112 }
1113 
1114 
1115 void MacroAssembler::MultiPop(RegList regs) {
1116  int16_t stack_offset = 0;
1117 
1118  for (int16_t i = 0; i < kNumRegisters; i++) {
1119  if ((regs & (1 << i)) != 0) {
1120  lw(ToRegister(i), MemOperand(sp, stack_offset));
1121  stack_offset += kPointerSize;
1122  }
1123  }
1124  addiu(sp, sp, stack_offset);
1125 }
1126 
1127 
1128 void MacroAssembler::MultiPopReversed(RegList regs) {
1129  int16_t stack_offset = 0;
1130 
1131  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1132  if ((regs & (1 << i)) != 0) {
1133  lw(ToRegister(i), MemOperand(sp, stack_offset));
1134  stack_offset += kPointerSize;
1135  }
1136  }
1137  addiu(sp, sp, stack_offset);
1138 }
1139 
1140 
1141 void MacroAssembler::MultiPushFPU(RegList regs) {
1142  int16_t num_to_push = NumberOfBitsSet(regs);
1143  int16_t stack_offset = num_to_push * kDoubleSize;
1144 
1145  Subu(sp, sp, Operand(stack_offset));
1146  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1147  if ((regs & (1 << i)) != 0) {
1148  stack_offset -= kDoubleSize;
1149  sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1150  }
1151  }
1152 }
1153 
1154 
1155 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1156  int16_t num_to_push = NumberOfBitsSet(regs);
1157  int16_t stack_offset = num_to_push * kDoubleSize;
1158 
1159  Subu(sp, sp, Operand(stack_offset));
1160  for (int16_t i = 0; i < kNumRegisters; i++) {
1161  if ((regs & (1 << i)) != 0) {
1162  stack_offset -= kDoubleSize;
1163  sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1164  }
1165  }
1166 }
1167 
1168 
1169 void MacroAssembler::MultiPopFPU(RegList regs) {
1170  int16_t stack_offset = 0;
1171 
1172  for (int16_t i = 0; i < kNumRegisters; i++) {
1173  if ((regs & (1 << i)) != 0) {
1174  ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1175  stack_offset += kDoubleSize;
1176  }
1177  }
1178  addiu(sp, sp, stack_offset);
1179 }
1180 
1181 
1182 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1183  int16_t stack_offset = 0;
1184 
1185  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1186  if ((regs & (1 << i)) != 0) {
1187  ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1188  stack_offset += kDoubleSize;
1189  }
1190  }
1191  addiu(sp, sp, stack_offset);
1192 }
1193 
1194 
1195 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
1196  RegList saved_regs = kJSCallerSaved | ra.bit();
1197  MultiPush(saved_regs);
1198  AllowExternalCallThatCantCauseGC scope(this);
1199 
1200  // Save to a0 in case address == t0.
1201  Move(a0, address);
1202  PrepareCallCFunction(2, t0);
1203 
1204  li(a1, instructions * kInstrSize);
1205  CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1206  MultiPop(saved_regs);
1207 }
1208 
1209 
1210 void MacroAssembler::Ext(Register rt,
1211  Register rs,
1212  uint16_t pos,
1213  uint16_t size) {
1214  DCHECK(pos < 32);
1215  DCHECK(pos + size < 33);
1216 
1218  ext_(rt, rs, pos, size);
1219  } else {
1220  // Move rs to rt and shift it left then right to get the
1221  // desired bitfield on the right side and zeroes on the left.
1222  int shift_left = 32 - (pos + size);
1223  sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
1224 
1225  int shift_right = 32 - size;
1226  if (shift_right > 0) {
1227  srl(rt, rt, shift_right);
1228  }
1229  }
1230 }
1231 
1232 
1233 void MacroAssembler::Ins(Register rt,
1234  Register rs,
1235  uint16_t pos,
1236  uint16_t size) {
1237  DCHECK(pos < 32);
1238  DCHECK(pos + size <= 32);
1239  DCHECK(size != 0);
1240 
1242  ins_(rt, rs, pos, size);
1243  } else {
1244  DCHECK(!rt.is(t8) && !rs.is(t8));
1245  Subu(at, zero_reg, Operand(1));
1246  srl(at, at, 32 - size);
1247  and_(t8, rs, at);
1248  sll(t8, t8, pos);
1249  sll(at, at, pos);
1250  nor(at, at, zero_reg);
1251  and_(at, rt, at);
1252  or_(rt, t8, at);
1253  }
1254 }
1255 
1256 
1257 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1258  FPURegister fs,
1259  FPURegister scratch) {
1260  // Move the data from fs to t8.
1261  mfc1(t8, fs);
1262  Cvt_d_uw(fd, t8, scratch);
1263 }
1264 
1265 
1266 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1267  Register rs,
1268  FPURegister scratch) {
1269  // Convert rs to a FP value in fd (and fd + 1).
1270  // We do this by converting rs minus the MSB to avoid sign conversion,
1271  // then adding 2^31 to the result (if needed).
1272 
1273  DCHECK(!fd.is(scratch));
1274  DCHECK(!rs.is(t9));
1275  DCHECK(!rs.is(at));
1276 
1277  // Save rs's MSB to t9.
1278  Ext(t9, rs, 31, 1);
1279  // Remove rs's MSB.
1280  Ext(at, rs, 0, 31);
1281  // Move the result to fd.
1282  mtc1(at, fd);
1283 
1284  // Convert fd to a real FP value.
1285  cvt_d_w(fd, fd);
1286 
1287  Label conversion_done;
1288 
1289  // If rs's MSB was 0, it's done.
1290  // Otherwise we need to add that to the FP register.
1291  Branch(&conversion_done, eq, t9, Operand(zero_reg));
1292 
1293  // Load 2^31 into f20 as its float representation.
1294  li(at, 0x41E00000);
1295  mtc1(zero_reg, scratch);
1296  Mthc1(at, scratch);
1297  // Add it to fd.
1298  add_d(fd, fd, scratch);
1299 
1300  bind(&conversion_done);
1301 }
1302 
1303 
1304 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1305  FPURegister fs,
1306  FPURegister scratch) {
1307  Trunc_uw_d(fs, t8, scratch);
1308  mtc1(t8, fd);
1309 }
1310 
1311 
1312 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1313  if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1314  Mfhc1(t8, fs);
1315  trunc_w_d(fd, fs);
1316  Mthc1(t8, fs);
1317  } else {
1318  trunc_w_d(fd, fs);
1319  }
1320 }
1321 
1322 
1323 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1324  if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1325  Mfhc1(t8, fs);
1326  round_w_d(fd, fs);
1327  Mthc1(t8, fs);
1328  } else {
1329  round_w_d(fd, fs);
1330  }
1331 }
1332 
1333 
1334 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1335  if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1336  Mfhc1(t8, fs);
1337  floor_w_d(fd, fs);
1338  Mthc1(t8, fs);
1339  } else {
1340  floor_w_d(fd, fs);
1341  }
1342 }
1343 
1344 
1345 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1346  if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1347  Mfhc1(t8, fs);
1348  ceil_w_d(fd, fs);
1349  Mthc1(t8, fs);
1350  } else {
1351  ceil_w_d(fd, fs);
1352  }
1353 }
1354 
1355 
1356 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1357  Register rs,
1358  FPURegister scratch) {
1359  DCHECK(!fd.is(scratch));
1360  DCHECK(!rs.is(at));
1361 
1362  // Load 2^31 into scratch as its float representation.
1363  li(at, 0x41E00000);
1364  mtc1(zero_reg, scratch);
1365  Mthc1(at, scratch);
1366  // Test if scratch > fd.
1367  // If fd < 2^31 we can convert it normally.
1368  Label simple_convert;
1369  BranchF(&simple_convert, NULL, lt, fd, scratch);
1370 
1371  // First we subtract 2^31 from fd, then trunc it to rs
1372  // and add 2^31 to rs.
1373  sub_d(scratch, fd, scratch);
1374  trunc_w_d(scratch, scratch);
1375  mfc1(rs, scratch);
1376  Or(rs, rs, 1 << 31);
1377 
1378  Label done;
1379  Branch(&done);
1380  // Simple conversion.
1381  bind(&simple_convert);
1382  trunc_w_d(scratch, fd);
1383  mfc1(rs, scratch);
1384 
1385  bind(&done);
1386 }
1387 
1388 
1389 void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
1390  if (IsFp64Mode()) {
1391  mthc1(rt, fs);
1392  } else {
1393  mtc1(rt, fs.high());
1394  }
1395 }
1396 
1397 
1398 void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
1399  if (IsFp64Mode()) {
1400  mfhc1(rt, fs);
1401  } else {
1402  mfc1(rt, fs.high());
1403  }
1404 }
1405 
1406 
1407 void MacroAssembler::BranchF(Label* target,
1408  Label* nan,
1409  Condition cc,
1410  FPURegister cmp1,
1411  FPURegister cmp2,
1412  BranchDelaySlot bd) {
1413  BlockTrampolinePoolScope block_trampoline_pool(this);
1414  if (cc == al) {
1415  Branch(bd, target);
1416  return;
1417  }
1418 
1419  DCHECK(nan || target);
1420  // Check for unordered (NaN) cases.
1421  if (nan) {
1422  if (!IsMipsArchVariant(kMips32r6)) {
1423  c(UN, D, cmp1, cmp2);
1424  bc1t(nan);
1425  } else {
1426  // Use kDoubleCompareReg for comparison result. It has to be unavailable
1427  // to lithium register allocator.
1428  DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1429  cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
1430  bc1nez(nan, kDoubleCompareReg);
1431  }
1432  }
1433 
1434  if (!IsMipsArchVariant(kMips32r6)) {
1435  if (target) {
1436  // Here NaN cases were either handled by this function or are assumed to
1437  // have been handled by the caller.
1438  switch (cc) {
1439  case lt:
1440  c(OLT, D, cmp1, cmp2);
1441  bc1t(target);
1442  break;
1443  case gt:
1444  c(ULE, D, cmp1, cmp2);
1445  bc1f(target);
1446  break;
1447  case ge:
1448  c(ULT, D, cmp1, cmp2);
1449  bc1f(target);
1450  break;
1451  case le:
1452  c(OLE, D, cmp1, cmp2);
1453  bc1t(target);
1454  break;
1455  case eq:
1456  c(EQ, D, cmp1, cmp2);
1457  bc1t(target);
1458  break;
1459  case ueq:
1460  c(UEQ, D, cmp1, cmp2);
1461  bc1t(target);
1462  break;
1463  case ne:
1464  c(EQ, D, cmp1, cmp2);
1465  bc1f(target);
1466  break;
1467  case nue:
1468  c(UEQ, D, cmp1, cmp2);
1469  bc1f(target);
1470  break;
1471  default:
1472  CHECK(0);
1473  }
1474  }
1475  } else {
1476  if (target) {
1477  // Here NaN cases were either handled by this function or are assumed to
1478  // have been handled by the caller.
1479  // Unsigned conditions are treated as their signed counterpart.
1480  // Use kDoubleCompareReg for comparison result, it is
1481  // valid in fp64 (FR = 1) mode which is implied for mips32r6.
1482  DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1483  switch (cc) {
1484  case lt:
1485  cmp(OLT, L, kDoubleCompareReg, cmp1, cmp2);
1486  bc1nez(target, kDoubleCompareReg);
1487  break;
1488  case gt:
1489  cmp(ULE, L, kDoubleCompareReg, cmp1, cmp2);
1490  bc1eqz(target, kDoubleCompareReg);
1491  break;
1492  case ge:
1493  cmp(ULT, L, kDoubleCompareReg, cmp1, cmp2);
1494  bc1eqz(target, kDoubleCompareReg);
1495  break;
1496  case le:
1497  cmp(OLE, L, kDoubleCompareReg, cmp1, cmp2);
1498  bc1nez(target, kDoubleCompareReg);
1499  break;
1500  case eq:
1501  cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
1502  bc1nez(target, kDoubleCompareReg);
1503  break;
1504  case ueq:
1505  cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
1506  bc1nez(target, kDoubleCompareReg);
1507  break;
1508  case ne:
1509  cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
1510  bc1eqz(target, kDoubleCompareReg);
1511  break;
1512  case nue:
1513  cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
1514  bc1eqz(target, kDoubleCompareReg);
1515  break;
1516  default:
1517  CHECK(0);
1518  }
1519  }
1520  }
1521 
1522  if (bd == PROTECT) {
1523  nop();
1524  }
1525 }
1526 
1527 
1528 void MacroAssembler::Move(FPURegister dst, double imm) {
1529  static const DoubleRepresentation minus_zero(-0.0);
1530  static const DoubleRepresentation zero(0.0);
1531  DoubleRepresentation value_rep(imm);
1532  // Handle special values first.
1533  bool force_load = dst.is(kDoubleRegZero);
1534  if (value_rep == zero && !force_load) {
1535  mov_d(dst, kDoubleRegZero);
1536  } else if (value_rep == minus_zero && !force_load) {
1537  neg_d(dst, kDoubleRegZero);
1538  } else {
1539  uint32_t lo, hi;
1540  DoubleAsTwoUInt32(imm, &lo, &hi);
1541  // Move the low part of the double into the lower of the corresponding FPU
1542  // register of FPU register pair.
1543  if (lo != 0) {
1544  li(at, Operand(lo));
1545  mtc1(at, dst);
1546  } else {
1547  mtc1(zero_reg, dst);
1548  }
1549  // Move the high part of the double into the higher of the corresponding FPU
1550  // register of FPU register pair.
1551  if (hi != 0) {
1552  li(at, Operand(hi));
1553  Mthc1(at, dst);
1554  } else {
1555  Mthc1(zero_reg, dst);
1556  }
1557  }
1558 }
1559 
1560 
1561 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1563  Label done;
1564  Branch(&done, ne, rt, Operand(zero_reg));
1565  mov(rd, rs);
1566  bind(&done);
1567  } else {
1568  movz(rd, rs, rt);
1569  }
1570 }
1571 
1572 
1573 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1575  Label done;
1576  Branch(&done, eq, rt, Operand(zero_reg));
1577  mov(rd, rs);
1578  bind(&done);
1579  } else {
1580  movn(rd, rs, rt);
1581  }
1582 }
1583 
1584 
1585 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1587  // Tests an FP condition code and then conditionally move rs to rd.
1588  // We do not currently use any FPU cc bit other than bit 0.
1589  DCHECK(cc == 0);
1590  DCHECK(!(rs.is(t8) || rd.is(t8)));
1591  Label done;
1592  Register scratch = t8;
1593  // For testing purposes we need to fetch content of the FCSR register and
1594  // than test its cc (floating point condition code) bit (for cc = 0, it is
1595  // 24. bit of the FCSR).
1596  cfc1(scratch, FCSR);
1597  // For the MIPS I, II and III architectures, the contents of scratch is
1598  // UNPREDICTABLE for the instruction immediately following CFC1.
1599  nop();
1600  srl(scratch, scratch, 16);
1601  andi(scratch, scratch, 0x0080);
1602  Branch(&done, eq, scratch, Operand(zero_reg));
1603  mov(rd, rs);
1604  bind(&done);
1605  } else {
1606  movt(rd, rs, cc);
1607  }
1608 }
1609 
1610 
1611 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1613  // Tests an FP condition code and then conditionally move rs to rd.
1614  // We do not currently use any FPU cc bit other than bit 0.
1615  DCHECK(cc == 0);
1616  DCHECK(!(rs.is(t8) || rd.is(t8)));
1617  Label done;
1618  Register scratch = t8;
1619  // For testing purposes we need to fetch content of the FCSR register and
1620  // than test its cc (floating point condition code) bit (for cc = 0, it is
1621  // 24. bit of the FCSR).
1622  cfc1(scratch, FCSR);
1623  // For the MIPS I, II and III architectures, the contents of scratch is
1624  // UNPREDICTABLE for the instruction immediately following CFC1.
1625  nop();
1626  srl(scratch, scratch, 16);
1627  andi(scratch, scratch, 0x0080);
1628  Branch(&done, ne, scratch, Operand(zero_reg));
1629  mov(rd, rs);
1630  bind(&done);
1631  } else {
1632  movf(rd, rs, cc);
1633  }
1634 }
1635 
1636 
1637 void MacroAssembler::Clz(Register rd, Register rs) {
1639  DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1640  Register mask = t8;
1641  Register scratch = t9;
1642  Label loop, end;
1643  mov(at, rs);
1644  mov(rd, zero_reg);
1645  lui(mask, 0x8000);
1646  bind(&loop);
1647  and_(scratch, at, mask);
1648  Branch(&end, ne, scratch, Operand(zero_reg));
1649  addiu(rd, rd, 1);
1650  Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1651  srl(mask, mask, 1);
1652  bind(&end);
1653  } else {
1654  clz(rd, rs);
1655  }
1656 }
1657 
1658 
1659 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1660  Register result,
1661  DoubleRegister double_input,
1662  Register scratch,
1663  DoubleRegister double_scratch,
1664  Register except_flag,
1665  CheckForInexactConversion check_inexact) {
1666  DCHECK(!result.is(scratch));
1667  DCHECK(!double_input.is(double_scratch));
1668  DCHECK(!except_flag.is(scratch));
1669 
1670  Label done;
1671 
1672  // Clear the except flag (0 = no exception)
1673  mov(except_flag, zero_reg);
1674 
1675  // Test for values that can be exactly represented as a signed 32-bit integer.
1676  cvt_w_d(double_scratch, double_input);
1677  mfc1(result, double_scratch);
1678  cvt_d_w(double_scratch, double_scratch);
1679  BranchF(&done, NULL, eq, double_input, double_scratch);
1680 
1681  int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1682 
1683  if (check_inexact == kDontCheckForInexactConversion) {
1684  // Ignore inexact exceptions.
1685  except_mask &= ~kFCSRInexactFlagMask;
1686  }
1687 
1688  // Save FCSR.
1689  cfc1(scratch, FCSR);
1690  // Disable FPU exceptions.
1691  ctc1(zero_reg, FCSR);
1692 
1693  // Do operation based on rounding mode.
1694  switch (rounding_mode) {
1695  case kRoundToNearest:
1696  Round_w_d(double_scratch, double_input);
1697  break;
1698  case kRoundToZero:
1699  Trunc_w_d(double_scratch, double_input);
1700  break;
1701  case kRoundToPlusInf:
1702  Ceil_w_d(double_scratch, double_input);
1703  break;
1704  case kRoundToMinusInf:
1705  Floor_w_d(double_scratch, double_input);
1706  break;
1707  } // End of switch-statement.
1708 
1709  // Retrieve FCSR.
1710  cfc1(except_flag, FCSR);
1711  // Restore FCSR.
1712  ctc1(scratch, FCSR);
1713  // Move the converted value into the result register.
1714  mfc1(result, double_scratch);
1715 
1716  // Check for fpu exceptions.
1717  And(except_flag, except_flag, Operand(except_mask));
1718 
1719  bind(&done);
1720 }
1721 
1722 
1723 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1724  DoubleRegister double_input,
1725  Label* done) {
1726  DoubleRegister single_scratch = kLithiumScratchDouble.low();
1727  Register scratch = at;
1728  Register scratch2 = t9;
1729 
1730  // Clear cumulative exception flags and save the FCSR.
1731  cfc1(scratch2, FCSR);
1732  ctc1(zero_reg, FCSR);
1733  // Try a conversion to a signed integer.
1734  trunc_w_d(single_scratch, double_input);
1735  mfc1(result, single_scratch);
1736  // Retrieve and restore the FCSR.
1737  cfc1(scratch, FCSR);
1738  ctc1(scratch2, FCSR);
1739  // Check for overflow and NaNs.
1740  And(scratch,
1741  scratch,
1743  // If we had no exceptions we are done.
1744  Branch(done, eq, scratch, Operand(zero_reg));
1745 }
1746 
1747 
1748 void MacroAssembler::TruncateDoubleToI(Register result,
1749  DoubleRegister double_input) {
1750  Label done;
1751 
1752  TryInlineTruncateDoubleToI(result, double_input, &done);
1753 
1754  // If we fell through then inline version didn't succeed - call stub instead.
1755  push(ra);
1756  Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1757  sdc1(double_input, MemOperand(sp, 0));
1758 
1759  DoubleToIStub stub(isolate(), sp, result, 0, true, true);
1760  CallStub(&stub);
1761 
1762  Addu(sp, sp, Operand(kDoubleSize));
1763  pop(ra);
1764 
1765  bind(&done);
1766 }
1767 
1768 
1769 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1770  Label done;
1771  DoubleRegister double_scratch = f12;
1772  DCHECK(!result.is(object));
1773 
1774  ldc1(double_scratch,
1775  MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1776  TryInlineTruncateDoubleToI(result, double_scratch, &done);
1777 
1778  // If we fell through then inline version didn't succeed - call stub instead.
1779  push(ra);
1780  DoubleToIStub stub(isolate(),
1781  object,
1782  result,
1783  HeapNumber::kValueOffset - kHeapObjectTag,
1784  true,
1785  true);
1786  CallStub(&stub);
1787  pop(ra);
1788 
1789  bind(&done);
1790 }
1791 
1792 
1793 void MacroAssembler::TruncateNumberToI(Register object,
1794  Register result,
1795  Register heap_number_map,
1796  Register scratch,
1797  Label* not_number) {
1798  Label done;
1799  DCHECK(!result.is(object));
1800 
1801  UntagAndJumpIfSmi(result, object, &done);
1802  JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1803  TruncateHeapNumberToI(result, object);
1804 
1805  bind(&done);
1806 }
1807 
1808 
1809 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1810  Register src,
1811  int num_least_bits) {
1812  Ext(dst, src, kSmiTagSize, num_least_bits);
1813 }
1814 
1815 
1816 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1817  Register src,
1818  int num_least_bits) {
1819  And(dst, src, Operand((1 << num_least_bits) - 1));
1820 }
1821 
1822 
1823 // Emulated condtional branches do not emit a nop in the branch delay slot.
1824 //
1825 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1826 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
1827  (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1828  (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1829 
1830 
1831 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1832  BranchShort(offset, bdslot);
1833 }
1834 
1835 
1836 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1837  const Operand& rt,
1838  BranchDelaySlot bdslot) {
1839  BranchShort(offset, cond, rs, rt, bdslot);
1840 }
1841 
1842 
1843 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1844  if (L->is_bound()) {
1845  if (is_near(L)) {
1846  BranchShort(L, bdslot);
1847  } else {
1848  Jr(L, bdslot);
1849  }
1850  } else {
1851  if (is_trampoline_emitted()) {
1852  Jr(L, bdslot);
1853  } else {
1854  BranchShort(L, bdslot);
1855  }
1856  }
1857 }
1858 
1859 
1860 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1861  const Operand& rt,
1862  BranchDelaySlot bdslot) {
1863  if (L->is_bound()) {
1864  if (is_near(L)) {
1865  BranchShort(L, cond, rs, rt, bdslot);
1866  } else {
1867  if (cond != cc_always) {
1868  Label skip;
1869  Condition neg_cond = NegateCondition(cond);
1870  BranchShort(&skip, neg_cond, rs, rt);
1871  Jr(L, bdslot);
1872  bind(&skip);
1873  } else {
1874  Jr(L, bdslot);
1875  }
1876  }
1877  } else {
1878  if (is_trampoline_emitted()) {
1879  if (cond != cc_always) {
1880  Label skip;
1881  Condition neg_cond = NegateCondition(cond);
1882  BranchShort(&skip, neg_cond, rs, rt);
1883  Jr(L, bdslot);
1884  bind(&skip);
1885  } else {
1886  Jr(L, bdslot);
1887  }
1888  } else {
1889  BranchShort(L, cond, rs, rt, bdslot);
1890  }
1891  }
1892 }
1893 
1894 
1895 void MacroAssembler::Branch(Label* L,
1896  Condition cond,
1897  Register rs,
1898  Heap::RootListIndex index,
1899  BranchDelaySlot bdslot) {
1900  LoadRoot(at, index);
1901  Branch(L, cond, rs, Operand(at), bdslot);
1902 }
1903 
1904 
1905 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1906  b(offset);
1907 
1908  // Emit a nop in the branch delay slot if required.
1909  if (bdslot == PROTECT)
1910  nop();
1911 }
1912 
1913 
1914 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1915  const Operand& rt,
1916  BranchDelaySlot bdslot) {
1917  BRANCH_ARGS_CHECK(cond, rs, rt);
1918  DCHECK(!rs.is(zero_reg));
1919  Register r2 = no_reg;
1920  Register scratch = at;
1921 
1922  if (rt.is_reg()) {
1923  // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1924  // rt.
1925  BlockTrampolinePoolScope block_trampoline_pool(this);
1926  r2 = rt.rm_;
1927  switch (cond) {
1928  case cc_always:
1929  b(offset);
1930  break;
1931  case eq:
1932  beq(rs, r2, offset);
1933  break;
1934  case ne:
1935  bne(rs, r2, offset);
1936  break;
1937  // Signed comparison.
1938  case greater:
1939  if (r2.is(zero_reg)) {
1940  bgtz(rs, offset);
1941  } else {
1942  slt(scratch, r2, rs);
1943  bne(scratch, zero_reg, offset);
1944  }
1945  break;
1946  case greater_equal:
1947  if (r2.is(zero_reg)) {
1948  bgez(rs, offset);
1949  } else {
1950  slt(scratch, rs, r2);
1951  beq(scratch, zero_reg, offset);
1952  }
1953  break;
1954  case less:
1955  if (r2.is(zero_reg)) {
1956  bltz(rs, offset);
1957  } else {
1958  slt(scratch, rs, r2);
1959  bne(scratch, zero_reg, offset);
1960  }
1961  break;
1962  case less_equal:
1963  if (r2.is(zero_reg)) {
1964  blez(rs, offset);
1965  } else {
1966  slt(scratch, r2, rs);
1967  beq(scratch, zero_reg, offset);
1968  }
1969  break;
1970  // Unsigned comparison.
1971  case Ugreater:
1972  if (r2.is(zero_reg)) {
1973  bne(rs, zero_reg, offset);
1974  } else {
1975  sltu(scratch, r2, rs);
1976  bne(scratch, zero_reg, offset);
1977  }
1978  break;
1979  case Ugreater_equal:
1980  if (r2.is(zero_reg)) {
1981  b(offset);
1982  } else {
1983  sltu(scratch, rs, r2);
1984  beq(scratch, zero_reg, offset);
1985  }
1986  break;
1987  case Uless:
1988  if (r2.is(zero_reg)) {
1989  // No code needs to be emitted.
1990  return;
1991  } else {
1992  sltu(scratch, rs, r2);
1993  bne(scratch, zero_reg, offset);
1994  }
1995  break;
1996  case Uless_equal:
1997  if (r2.is(zero_reg)) {
1998  beq(rs, zero_reg, offset);
1999  } else {
2000  sltu(scratch, r2, rs);
2001  beq(scratch, zero_reg, offset);
2002  }
2003  break;
2004  default:
2005  UNREACHABLE();
2006  }
2007  } else {
2008  // Be careful to always use shifted_branch_offset only just before the
2009  // branch instruction, as the location will be remember for patching the
2010  // target.
2011  BlockTrampolinePoolScope block_trampoline_pool(this);
2012  switch (cond) {
2013  case cc_always:
2014  b(offset);
2015  break;
2016  case eq:
2017  // We don't want any other register but scratch clobbered.
2018  DCHECK(!scratch.is(rs));
2019  r2 = scratch;
2020  li(r2, rt);
2021  beq(rs, r2, offset);
2022  break;
2023  case ne:
2024  // We don't want any other register but scratch clobbered.
2025  DCHECK(!scratch.is(rs));
2026  r2 = scratch;
2027  li(r2, rt);
2028  bne(rs, r2, offset);
2029  break;
2030  // Signed comparison.
2031  case greater:
2032  if (rt.imm32_ == 0) {
2033  bgtz(rs, offset);
2034  } else {
2035  r2 = scratch;
2036  li(r2, rt);
2037  slt(scratch, r2, rs);
2038  bne(scratch, zero_reg, offset);
2039  }
2040  break;
2041  case greater_equal:
2042  if (rt.imm32_ == 0) {
2043  bgez(rs, offset);
2044  } else if (is_int16(rt.imm32_)) {
2045  slti(scratch, rs, rt.imm32_);
2046  beq(scratch, zero_reg, offset);
2047  } else {
2048  r2 = scratch;
2049  li(r2, rt);
2050  slt(scratch, rs, r2);
2051  beq(scratch, zero_reg, offset);
2052  }
2053  break;
2054  case less:
2055  if (rt.imm32_ == 0) {
2056  bltz(rs, offset);
2057  } else if (is_int16(rt.imm32_)) {
2058  slti(scratch, rs, rt.imm32_);
2059  bne(scratch, zero_reg, offset);
2060  } else {
2061  r2 = scratch;
2062  li(r2, rt);
2063  slt(scratch, rs, r2);
2064  bne(scratch, zero_reg, offset);
2065  }
2066  break;
2067  case less_equal:
2068  if (rt.imm32_ == 0) {
2069  blez(rs, offset);
2070  } else {
2071  r2 = scratch;
2072  li(r2, rt);
2073  slt(scratch, r2, rs);
2074  beq(scratch, zero_reg, offset);
2075  }
2076  break;
2077  // Unsigned comparison.
2078  case Ugreater:
2079  if (rt.imm32_ == 0) {
2080  bne(rs, zero_reg, offset);
2081  } else {
2082  r2 = scratch;
2083  li(r2, rt);
2084  sltu(scratch, r2, rs);
2085  bne(scratch, zero_reg, offset);
2086  }
2087  break;
2088  case Ugreater_equal:
2089  if (rt.imm32_ == 0) {
2090  b(offset);
2091  } else if (is_int16(rt.imm32_)) {
2092  sltiu(scratch, rs, rt.imm32_);
2093  beq(scratch, zero_reg, offset);
2094  } else {
2095  r2 = scratch;
2096  li(r2, rt);
2097  sltu(scratch, rs, r2);
2098  beq(scratch, zero_reg, offset);
2099  }
2100  break;
2101  case Uless:
2102  if (rt.imm32_ == 0) {
2103  // No code needs to be emitted.
2104  return;
2105  } else if (is_int16(rt.imm32_)) {
2106  sltiu(scratch, rs, rt.imm32_);
2107  bne(scratch, zero_reg, offset);
2108  } else {
2109  r2 = scratch;
2110  li(r2, rt);
2111  sltu(scratch, rs, r2);
2112  bne(scratch, zero_reg, offset);
2113  }
2114  break;
2115  case Uless_equal:
2116  if (rt.imm32_ == 0) {
2117  beq(rs, zero_reg, offset);
2118  } else {
2119  r2 = scratch;
2120  li(r2, rt);
2121  sltu(scratch, r2, rs);
2122  beq(scratch, zero_reg, offset);
2123  }
2124  break;
2125  default:
2126  UNREACHABLE();
2127  }
2128  }
2129  // Emit a nop in the branch delay slot if required.
2130  if (bdslot == PROTECT)
2131  nop();
2132 }
2133 
2134 
2135 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2136  // We use branch_offset as an argument for the branch instructions to be sure
2137  // it is called just before generating the branch instruction, as needed.
2138 
2139  b(shifted_branch_offset(L, false));
2140 
2141  // Emit a nop in the branch delay slot if required.
2142  if (bdslot == PROTECT)
2143  nop();
2144 }
2145 
2146 
2147 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
2148  const Operand& rt,
2149  BranchDelaySlot bdslot) {
2150  BRANCH_ARGS_CHECK(cond, rs, rt);
2151 
2152  int32_t offset = 0;
2153  Register r2 = no_reg;
2154  Register scratch = at;
2155  if (rt.is_reg()) {
2156  BlockTrampolinePoolScope block_trampoline_pool(this);
2157  r2 = rt.rm_;
2158  // Be careful to always use shifted_branch_offset only just before the
2159  // branch instruction, as the location will be remember for patching the
2160  // target.
2161  switch (cond) {
2162  case cc_always:
2163  offset = shifted_branch_offset(L, false);
2164  b(offset);
2165  break;
2166  case eq:
2167  offset = shifted_branch_offset(L, false);
2168  beq(rs, r2, offset);
2169  break;
2170  case ne:
2171  offset = shifted_branch_offset(L, false);
2172  bne(rs, r2, offset);
2173  break;
2174  // Signed comparison.
2175  case greater:
2176  if (r2.is(zero_reg)) {
2177  offset = shifted_branch_offset(L, false);
2178  bgtz(rs, offset);
2179  } else {
2180  slt(scratch, r2, rs);
2181  offset = shifted_branch_offset(L, false);
2182  bne(scratch, zero_reg, offset);
2183  }
2184  break;
2185  case greater_equal:
2186  if (r2.is(zero_reg)) {
2187  offset = shifted_branch_offset(L, false);
2188  bgez(rs, offset);
2189  } else {
2190  slt(scratch, rs, r2);
2191  offset = shifted_branch_offset(L, false);
2192  beq(scratch, zero_reg, offset);
2193  }
2194  break;
2195  case less:
2196  if (r2.is(zero_reg)) {
2197  offset = shifted_branch_offset(L, false);
2198  bltz(rs, offset);
2199  } else {
2200  slt(scratch, rs, r2);
2201  offset = shifted_branch_offset(L, false);
2202  bne(scratch, zero_reg, offset);
2203  }
2204  break;
2205  case less_equal:
2206  if (r2.is(zero_reg)) {
2207  offset = shifted_branch_offset(L, false);
2208  blez(rs, offset);
2209  } else {
2210  slt(scratch, r2, rs);
2211  offset = shifted_branch_offset(L, false);
2212  beq(scratch, zero_reg, offset);
2213  }
2214  break;
2215  // Unsigned comparison.
2216  case Ugreater:
2217  if (r2.is(zero_reg)) {
2218  offset = shifted_branch_offset(L, false);
2219  bne(rs, zero_reg, offset);
2220  } else {
2221  sltu(scratch, r2, rs);
2222  offset = shifted_branch_offset(L, false);
2223  bne(scratch, zero_reg, offset);
2224  }
2225  break;
2226  case Ugreater_equal:
2227  if (r2.is(zero_reg)) {
2228  offset = shifted_branch_offset(L, false);
2229  b(offset);
2230  } else {
2231  sltu(scratch, rs, r2);
2232  offset = shifted_branch_offset(L, false);
2233  beq(scratch, zero_reg, offset);
2234  }
2235  break;
2236  case Uless:
2237  if (r2.is(zero_reg)) {
2238  // No code needs to be emitted.
2239  return;
2240  } else {
2241  sltu(scratch, rs, r2);
2242  offset = shifted_branch_offset(L, false);
2243  bne(scratch, zero_reg, offset);
2244  }
2245  break;
2246  case Uless_equal:
2247  if (r2.is(zero_reg)) {
2248  offset = shifted_branch_offset(L, false);
2249  beq(rs, zero_reg, offset);
2250  } else {
2251  sltu(scratch, r2, rs);
2252  offset = shifted_branch_offset(L, false);
2253  beq(scratch, zero_reg, offset);
2254  }
2255  break;
2256  default:
2257  UNREACHABLE();
2258  }
2259  } else {
2260  // Be careful to always use shifted_branch_offset only just before the
2261  // branch instruction, as the location will be remember for patching the
2262  // target.
2263  BlockTrampolinePoolScope block_trampoline_pool(this);
2264  switch (cond) {
2265  case cc_always:
2266  offset = shifted_branch_offset(L, false);
2267  b(offset);
2268  break;
2269  case eq:
2270  DCHECK(!scratch.is(rs));
2271  r2 = scratch;
2272  li(r2, rt);
2273  offset = shifted_branch_offset(L, false);
2274  beq(rs, r2, offset);
2275  break;
2276  case ne:
2277  DCHECK(!scratch.is(rs));
2278  r2 = scratch;
2279  li(r2, rt);
2280  offset = shifted_branch_offset(L, false);
2281  bne(rs, r2, offset);
2282  break;
2283  // Signed comparison.
2284  case greater:
2285  if (rt.imm32_ == 0) {
2286  offset = shifted_branch_offset(L, false);
2287  bgtz(rs, offset);
2288  } else {
2289  DCHECK(!scratch.is(rs));
2290  r2 = scratch;
2291  li(r2, rt);
2292  slt(scratch, r2, rs);
2293  offset = shifted_branch_offset(L, false);
2294  bne(scratch, zero_reg, offset);
2295  }
2296  break;
2297  case greater_equal:
2298  if (rt.imm32_ == 0) {
2299  offset = shifted_branch_offset(L, false);
2300  bgez(rs, offset);
2301  } else if (is_int16(rt.imm32_)) {
2302  slti(scratch, rs, rt.imm32_);
2303  offset = shifted_branch_offset(L, false);
2304  beq(scratch, zero_reg, offset);
2305  } else {
2306  DCHECK(!scratch.is(rs));
2307  r2 = scratch;
2308  li(r2, rt);
2309  slt(scratch, rs, r2);
2310  offset = shifted_branch_offset(L, false);
2311  beq(scratch, zero_reg, offset);
2312  }
2313  break;
2314  case less:
2315  if (rt.imm32_ == 0) {
2316  offset = shifted_branch_offset(L, false);
2317  bltz(rs, offset);
2318  } else if (is_int16(rt.imm32_)) {
2319  slti(scratch, rs, rt.imm32_);
2320  offset = shifted_branch_offset(L, false);
2321  bne(scratch, zero_reg, offset);
2322  } else {
2323  DCHECK(!scratch.is(rs));
2324  r2 = scratch;
2325  li(r2, rt);
2326  slt(scratch, rs, r2);
2327  offset = shifted_branch_offset(L, false);
2328  bne(scratch, zero_reg, offset);
2329  }
2330  break;
2331  case less_equal:
2332  if (rt.imm32_ == 0) {
2333  offset = shifted_branch_offset(L, false);
2334  blez(rs, offset);
2335  } else {
2336  DCHECK(!scratch.is(rs));
2337  r2 = scratch;
2338  li(r2, rt);
2339  slt(scratch, r2, rs);
2340  offset = shifted_branch_offset(L, false);
2341  beq(scratch, zero_reg, offset);
2342  }
2343  break;
2344  // Unsigned comparison.
2345  case Ugreater:
2346  if (rt.imm32_ == 0) {
2347  offset = shifted_branch_offset(L, false);
2348  bne(rs, zero_reg, offset);
2349  } else {
2350  DCHECK(!scratch.is(rs));
2351  r2 = scratch;
2352  li(r2, rt);
2353  sltu(scratch, r2, rs);
2354  offset = shifted_branch_offset(L, false);
2355  bne(scratch, zero_reg, offset);
2356  }
2357  break;
2358  case Ugreater_equal:
2359  if (rt.imm32_ == 0) {
2360  offset = shifted_branch_offset(L, false);
2361  b(offset);
2362  } else if (is_int16(rt.imm32_)) {
2363  sltiu(scratch, rs, rt.imm32_);
2364  offset = shifted_branch_offset(L, false);
2365  beq(scratch, zero_reg, offset);
2366  } else {
2367  DCHECK(!scratch.is(rs));
2368  r2 = scratch;
2369  li(r2, rt);
2370  sltu(scratch, rs, r2);
2371  offset = shifted_branch_offset(L, false);
2372  beq(scratch, zero_reg, offset);
2373  }
2374  break;
2375  case Uless:
2376  if (rt.imm32_ == 0) {
2377  // No code needs to be emitted.
2378  return;
2379  } else if (is_int16(rt.imm32_)) {
2380  sltiu(scratch, rs, rt.imm32_);
2381  offset = shifted_branch_offset(L, false);
2382  bne(scratch, zero_reg, offset);
2383  } else {
2384  DCHECK(!scratch.is(rs));
2385  r2 = scratch;
2386  li(r2, rt);
2387  sltu(scratch, rs, r2);
2388  offset = shifted_branch_offset(L, false);
2389  bne(scratch, zero_reg, offset);
2390  }
2391  break;
2392  case Uless_equal:
2393  if (rt.imm32_ == 0) {
2394  offset = shifted_branch_offset(L, false);
2395  beq(rs, zero_reg, offset);
2396  } else {
2397  DCHECK(!scratch.is(rs));
2398  r2 = scratch;
2399  li(r2, rt);
2400  sltu(scratch, r2, rs);
2401  offset = shifted_branch_offset(L, false);
2402  beq(scratch, zero_reg, offset);
2403  }
2404  break;
2405  default:
2406  UNREACHABLE();
2407  }
2408  }
2409  // Check that offset could actually hold on an int16_t.
2410  DCHECK(is_int16(offset));
2411  // Emit a nop in the branch delay slot if required.
2412  if (bdslot == PROTECT)
2413  nop();
2414 }
2415 
2416 
2417 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2418  BranchAndLinkShort(offset, bdslot);
2419 }
2420 
2421 
2422 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2423  const Operand& rt,
2424  BranchDelaySlot bdslot) {
2425  BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2426 }
2427 
2428 
2429 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2430  if (L->is_bound()) {
2431  if (is_near(L)) {
2432  BranchAndLinkShort(L, bdslot);
2433  } else {
2434  Jalr(L, bdslot);
2435  }
2436  } else {
2437  if (is_trampoline_emitted()) {
2438  Jalr(L, bdslot);
2439  } else {
2440  BranchAndLinkShort(L, bdslot);
2441  }
2442  }
2443 }
2444 
2445 
2446 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2447  const Operand& rt,
2448  BranchDelaySlot bdslot) {
2449  if (L->is_bound()) {
2450  if (is_near(L)) {
2451  BranchAndLinkShort(L, cond, rs, rt, bdslot);
2452  } else {
2453  Label skip;
2454  Condition neg_cond = NegateCondition(cond);
2455  BranchShort(&skip, neg_cond, rs, rt);
2456  Jalr(L, bdslot);
2457  bind(&skip);
2458  }
2459  } else {
2460  if (is_trampoline_emitted()) {
2461  Label skip;
2462  Condition neg_cond = NegateCondition(cond);
2463  BranchShort(&skip, neg_cond, rs, rt);
2464  Jalr(L, bdslot);
2465  bind(&skip);
2466  } else {
2467  BranchAndLinkShort(L, cond, rs, rt, bdslot);
2468  }
2469  }
2470 }
2471 
2472 
2473 // We need to use a bgezal or bltzal, but they can't be used directly with the
2474 // slt instructions. We could use sub or add instead but we would miss overflow
2475 // cases, so we keep slt and add an intermediate third instruction.
2476 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2477  BranchDelaySlot bdslot) {
2478  bal(offset);
2479 
2480  // Emit a nop in the branch delay slot if required.
2481  if (bdslot == PROTECT)
2482  nop();
2483 }
2484 
2485 
2486 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2487  Register rs, const Operand& rt,
2488  BranchDelaySlot bdslot) {
2489  BRANCH_ARGS_CHECK(cond, rs, rt);
2490  Register r2 = no_reg;
2491  Register scratch = at;
2492 
2493  if (rt.is_reg()) {
2494  r2 = rt.rm_;
2495  } else if (cond != cc_always) {
2496  r2 = scratch;
2497  li(r2, rt);
2498  }
2499 
2500  if (!IsMipsArchVariant(kMips32r6)) {
2501  BlockTrampolinePoolScope block_trampoline_pool(this);
2502  switch (cond) {
2503  case cc_always:
2504  bal(offset);
2505  break;
2506  case eq:
2507  bne(rs, r2, 2);
2508  nop();
2509  bal(offset);
2510  break;
2511  case ne:
2512  beq(rs, r2, 2);
2513  nop();
2514  bal(offset);
2515  break;
2516 
2517  // Signed comparison.
2518  case greater:
2519  slt(scratch, r2, rs);
2520  addiu(scratch, scratch, -1);
2521  bgezal(scratch, offset);
2522  break;
2523  case greater_equal:
2524  slt(scratch, rs, r2);
2525  addiu(scratch, scratch, -1);
2526  bltzal(scratch, offset);
2527  break;
2528  case less:
2529  slt(scratch, rs, r2);
2530  addiu(scratch, scratch, -1);
2531  bgezal(scratch, offset);
2532  break;
2533  case less_equal:
2534  slt(scratch, r2, rs);
2535  addiu(scratch, scratch, -1);
2536  bltzal(scratch, offset);
2537  break;
2538 
2539  // Unsigned comparison.
2540  case Ugreater:
2541  sltu(scratch, r2, rs);
2542  addiu(scratch, scratch, -1);
2543  bgezal(scratch, offset);
2544  break;
2545  case Ugreater_equal:
2546  sltu(scratch, rs, r2);
2547  addiu(scratch, scratch, -1);
2548  bltzal(scratch, offset);
2549  break;
2550  case Uless:
2551  sltu(scratch, rs, r2);
2552  addiu(scratch, scratch, -1);
2553  bgezal(scratch, offset);
2554  break;
2555  case Uless_equal:
2556  sltu(scratch, r2, rs);
2557  addiu(scratch, scratch, -1);
2558  bltzal(scratch, offset);
2559  break;
2560 
2561  default:
2562  UNREACHABLE();
2563  }
2564  } else {
2565  BlockTrampolinePoolScope block_trampoline_pool(this);
2566  switch (cond) {
2567  case cc_always:
2568  bal(offset);
2569  break;
2570  case eq:
2571  bne(rs, r2, 2);
2572  nop();
2573  bal(offset);
2574  break;
2575  case ne:
2576  beq(rs, r2, 2);
2577  nop();
2578  bal(offset);
2579  break;
2580 
2581  // Signed comparison.
2582  case greater:
2583  // rs > rt
2584  slt(scratch, r2, rs);
2585  beq(scratch, zero_reg, 2);
2586  nop();
2587  bal(offset);
2588  break;
2589  case greater_equal:
2590  // rs >= rt
2591  slt(scratch, rs, r2);
2592  bne(scratch, zero_reg, 2);
2593  nop();
2594  bal(offset);
2595  break;
2596  case less:
2597  // rs < r2
2598  slt(scratch, rs, r2);
2599  bne(scratch, zero_reg, 2);
2600  nop();
2601  bal(offset);
2602  break;
2603  case less_equal:
2604  // rs <= r2
2605  slt(scratch, r2, rs);
2606  bne(scratch, zero_reg, 2);
2607  nop();
2608  bal(offset);
2609  break;
2610 
2611 
2612  // Unsigned comparison.
2613  case Ugreater:
2614  // rs > rt
2615  sltu(scratch, r2, rs);
2616  beq(scratch, zero_reg, 2);
2617  nop();
2618  bal(offset);
2619  break;
2620  case Ugreater_equal:
2621  // rs >= rt
2622  sltu(scratch, rs, r2);
2623  bne(scratch, zero_reg, 2);
2624  nop();
2625  bal(offset);
2626  break;
2627  case Uless:
2628  // rs < r2
2629  sltu(scratch, rs, r2);
2630  bne(scratch, zero_reg, 2);
2631  nop();
2632  bal(offset);
2633  break;
2634  case Uless_equal:
2635  // rs <= r2
2636  sltu(scratch, r2, rs);
2637  bne(scratch, zero_reg, 2);
2638  nop();
2639  bal(offset);
2640  break;
2641  default:
2642  UNREACHABLE();
2643  }
2644  }
2645 
2646  // Emit a nop in the branch delay slot if required.
2647  if (bdslot == PROTECT)
2648  nop();
2649 }
2650 
2651 
2652 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2653  bal(shifted_branch_offset(L, false));
2654 
2655  // Emit a nop in the branch delay slot if required.
2656  if (bdslot == PROTECT)
2657  nop();
2658 }
2659 
2660 
2661 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2662  const Operand& rt,
2663  BranchDelaySlot bdslot) {
2664  BRANCH_ARGS_CHECK(cond, rs, rt);
2665 
2666  int32_t offset = 0;
2667  Register r2 = no_reg;
2668  Register scratch = at;
2669  if (rt.is_reg()) {
2670  r2 = rt.rm_;
2671  } else if (cond != cc_always) {
2672  r2 = scratch;
2673  li(r2, rt);
2674  }
2675 
2676  if (!IsMipsArchVariant(kMips32r6)) {
2677  BlockTrampolinePoolScope block_trampoline_pool(this);
2678  switch (cond) {
2679  case cc_always:
2680  offset = shifted_branch_offset(L, false);
2681  bal(offset);
2682  break;
2683  case eq:
2684  bne(rs, r2, 2);
2685  nop();
2686  offset = shifted_branch_offset(L, false);
2687  bal(offset);
2688  break;
2689  case ne:
2690  beq(rs, r2, 2);
2691  nop();
2692  offset = shifted_branch_offset(L, false);
2693  bal(offset);
2694  break;
2695 
2696  // Signed comparison.
2697  case greater:
2698  slt(scratch, r2, rs);
2699  addiu(scratch, scratch, -1);
2700  offset = shifted_branch_offset(L, false);
2701  bgezal(scratch, offset);
2702  break;
2703  case greater_equal:
2704  slt(scratch, rs, r2);
2705  addiu(scratch, scratch, -1);
2706  offset = shifted_branch_offset(L, false);
2707  bltzal(scratch, offset);
2708  break;
2709  case less:
2710  slt(scratch, rs, r2);
2711  addiu(scratch, scratch, -1);
2712  offset = shifted_branch_offset(L, false);
2713  bgezal(scratch, offset);
2714  break;
2715  case less_equal:
2716  slt(scratch, r2, rs);
2717  addiu(scratch, scratch, -1);
2718  offset = shifted_branch_offset(L, false);
2719  bltzal(scratch, offset);
2720  break;
2721 
2722  // Unsigned comparison.
2723  case Ugreater:
2724  sltu(scratch, r2, rs);
2725  addiu(scratch, scratch, -1);
2726  offset = shifted_branch_offset(L, false);
2727  bgezal(scratch, offset);
2728  break;
2729  case Ugreater_equal:
2730  sltu(scratch, rs, r2);
2731  addiu(scratch, scratch, -1);
2732  offset = shifted_branch_offset(L, false);
2733  bltzal(scratch, offset);
2734  break;
2735  case Uless:
2736  sltu(scratch, rs, r2);
2737  addiu(scratch, scratch, -1);
2738  offset = shifted_branch_offset(L, false);
2739  bgezal(scratch, offset);
2740  break;
2741  case Uless_equal:
2742  sltu(scratch, r2, rs);
2743  addiu(scratch, scratch, -1);
2744  offset = shifted_branch_offset(L, false);
2745  bltzal(scratch, offset);
2746  break;
2747 
2748  default:
2749  UNREACHABLE();
2750  }
2751  } else {
2752  BlockTrampolinePoolScope block_trampoline_pool(this);
2753  switch (cond) {
2754  case cc_always:
2755  offset = shifted_branch_offset(L, false);
2756  bal(offset);
2757  break;
2758  case eq:
2759  bne(rs, r2, 2);
2760  nop();
2761  offset = shifted_branch_offset(L, false);
2762  bal(offset);
2763  break;
2764  case ne:
2765  beq(rs, r2, 2);
2766  nop();
2767  offset = shifted_branch_offset(L, false);
2768  bal(offset);
2769  break;
2770 
2771  // Signed comparison.
2772  case greater:
2773  // rs > rt
2774  slt(scratch, r2, rs);
2775  beq(scratch, zero_reg, 2);
2776  nop();
2777  offset = shifted_branch_offset(L, false);
2778  bal(offset);
2779  break;
2780  case greater_equal:
2781  // rs >= rt
2782  slt(scratch, rs, r2);
2783  bne(scratch, zero_reg, 2);
2784  nop();
2785  offset = shifted_branch_offset(L, false);
2786  bal(offset);
2787  break;
2788  case less:
2789  // rs < r2
2790  slt(scratch, rs, r2);
2791  bne(scratch, zero_reg, 2);
2792  nop();
2793  offset = shifted_branch_offset(L, false);
2794  bal(offset);
2795  break;
2796  case less_equal:
2797  // rs <= r2
2798  slt(scratch, r2, rs);
2799  bne(scratch, zero_reg, 2);
2800  nop();
2801  offset = shifted_branch_offset(L, false);
2802  bal(offset);
2803  break;
2804 
2805 
2806  // Unsigned comparison.
2807  case Ugreater:
2808  // rs > rt
2809  sltu(scratch, r2, rs);
2810  beq(scratch, zero_reg, 2);
2811  nop();
2812  offset = shifted_branch_offset(L, false);
2813  bal(offset);
2814  break;
2815  case Ugreater_equal:
2816  // rs >= rt
2817  sltu(scratch, rs, r2);
2818  bne(scratch, zero_reg, 2);
2819  nop();
2820  offset = shifted_branch_offset(L, false);
2821  bal(offset);
2822  break;
2823  case Uless:
2824  // rs < r2
2825  sltu(scratch, rs, r2);
2826  bne(scratch, zero_reg, 2);
2827  nop();
2828  offset = shifted_branch_offset(L, false);
2829  bal(offset);
2830  break;
2831  case Uless_equal:
2832  // rs <= r2
2833  sltu(scratch, r2, rs);
2834  bne(scratch, zero_reg, 2);
2835  nop();
2836  offset = shifted_branch_offset(L, false);
2837  bal(offset);
2838  break;
2839 
2840  default:
2841  UNREACHABLE();
2842  }
2843  }
2844 
2845  // Check that offset could actually hold on an int16_t.
2846  DCHECK(is_int16(offset));
2847 
2848  // Emit a nop in the branch delay slot if required.
2849  if (bdslot == PROTECT)
2850  nop();
2851 }
2852 
2853 
2854 void MacroAssembler::Jump(Register target,
2855  Condition cond,
2856  Register rs,
2857  const Operand& rt,
2858  BranchDelaySlot bd) {
2859  BlockTrampolinePoolScope block_trampoline_pool(this);
2860  if (cond == cc_always) {
2861  jr(target);
2862  } else {
2863  BRANCH_ARGS_CHECK(cond, rs, rt);
2864  Branch(2, NegateCondition(cond), rs, rt);
2865  jr(target);
2866  }
2867  // Emit a nop in the branch delay slot if required.
2868  if (bd == PROTECT)
2869  nop();
2870 }
2871 
2872 
2873 void MacroAssembler::Jump(intptr_t target,
2874  RelocInfo::Mode rmode,
2875  Condition cond,
2876  Register rs,
2877  const Operand& rt,
2878  BranchDelaySlot bd) {
2879  Label skip;
2880  if (cond != cc_always) {
2881  Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2882  }
2883  // The first instruction of 'li' may be placed in the delay slot.
2884  // This is not an issue, t9 is expected to be clobbered anyway.
2885  li(t9, Operand(target, rmode));
2886  Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2887  bind(&skip);
2888 }
2889 
2890 
2891 void MacroAssembler::Jump(Address target,
2892  RelocInfo::Mode rmode,
2893  Condition cond,
2894  Register rs,
2895  const Operand& rt,
2896  BranchDelaySlot bd) {
2897  DCHECK(!RelocInfo::IsCodeTarget(rmode));
2898  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2899 }
2900 
2901 
2902 void MacroAssembler::Jump(Handle<Code> code,
2903  RelocInfo::Mode rmode,
2904  Condition cond,
2905  Register rs,
2906  const Operand& rt,
2907  BranchDelaySlot bd) {
2908  DCHECK(RelocInfo::IsCodeTarget(rmode));
2909  AllowDeferredHandleDereference embedding_raw_address;
2910  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2911 }
2912 
2913 
2914 int MacroAssembler::CallSize(Register target,
2915  Condition cond,
2916  Register rs,
2917  const Operand& rt,
2918  BranchDelaySlot bd) {
2919  int size = 0;
2920 
2921  if (cond == cc_always) {
2922  size += 1;
2923  } else {
2924  size += 3;
2925  }
2926 
2927  if (bd == PROTECT)
2928  size += 1;
2929 
2930  return size * kInstrSize;
2931 }
2932 
2933 
2934 // Note: To call gcc-compiled C code on mips, you must call thru t9.
2935 void MacroAssembler::Call(Register target,
2936  Condition cond,
2937  Register rs,
2938  const Operand& rt,
2939  BranchDelaySlot bd) {
2940  BlockTrampolinePoolScope block_trampoline_pool(this);
2941  Label start;
2942  bind(&start);
2943  if (cond == cc_always) {
2944  jalr(target);
2945  } else {
2946  BRANCH_ARGS_CHECK(cond, rs, rt);
2947  Branch(2, NegateCondition(cond), rs, rt);
2948  jalr(target);
2949  }
2950  // Emit a nop in the branch delay slot if required.
2951  if (bd == PROTECT)
2952  nop();
2953 
2954  DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
2955  SizeOfCodeGeneratedSince(&start));
2956 }
2957 
2958 
2959 int MacroAssembler::CallSize(Address target,
2960  RelocInfo::Mode rmode,
2961  Condition cond,
2962  Register rs,
2963  const Operand& rt,
2964  BranchDelaySlot bd) {
2965  int size = CallSize(t9, cond, rs, rt, bd);
2966  return size + 2 * kInstrSize;
2967 }
2968 
2969 
2970 void MacroAssembler::Call(Address target,
2971  RelocInfo::Mode rmode,
2972  Condition cond,
2973  Register rs,
2974  const Operand& rt,
2975  BranchDelaySlot bd) {
2976  BlockTrampolinePoolScope block_trampoline_pool(this);
2977  Label start;
2978  bind(&start);
2979  int32_t target_int = reinterpret_cast<int32_t>(target);
2980  // Must record previous source positions before the
2981  // li() generates a new code target.
2982  positions_recorder()->WriteRecordedPositions();
2983  li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
2984  Call(t9, cond, rs, rt, bd);
2985  DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2986  SizeOfCodeGeneratedSince(&start));
2987 }
2988 
2989 
2990 int MacroAssembler::CallSize(Handle<Code> code,
2991  RelocInfo::Mode rmode,
2992  TypeFeedbackId ast_id,
2993  Condition cond,
2994  Register rs,
2995  const Operand& rt,
2996  BranchDelaySlot bd) {
2997  AllowDeferredHandleDereference using_raw_address;
2998  return CallSize(reinterpret_cast<Address>(code.location()),
2999  rmode, cond, rs, rt, bd);
3000 }
3001 
3002 
3003 void MacroAssembler::Call(Handle<Code> code,
3004  RelocInfo::Mode rmode,
3005  TypeFeedbackId ast_id,
3006  Condition cond,
3007  Register rs,
3008  const Operand& rt,
3009  BranchDelaySlot bd) {
3010  BlockTrampolinePoolScope block_trampoline_pool(this);
3011  Label start;
3012  bind(&start);
3013  DCHECK(RelocInfo::IsCodeTarget(rmode));
3014  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3015  SetRecordedAstId(ast_id);
3016  rmode = RelocInfo::CODE_TARGET_WITH_ID;
3017  }
3018  AllowDeferredHandleDereference embedding_raw_address;
3019  Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3020  DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3021  SizeOfCodeGeneratedSince(&start));
3022 }
3023 
3024 
3025 void MacroAssembler::Ret(Condition cond,
3026  Register rs,
3027  const Operand& rt,
3028  BranchDelaySlot bd) {
3029  Jump(ra, cond, rs, rt, bd);
3030 }
3031 
3032 
3033 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
3034  BlockTrampolinePoolScope block_trampoline_pool(this);
3035 
3036  uint32_t imm28;
3037  imm28 = jump_address(L);
3038  imm28 &= kImm28Mask;
3039  { BlockGrowBufferScope block_buf_growth(this);
3040  // Buffer growth (and relocation) must be blocked for internal references
3041  // until associated instructions are emitted and available to be patched.
3042  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3043  j(imm28);
3044  }
3045  // Emit a nop in the branch delay slot if required.
3046  if (bdslot == PROTECT)
3047  nop();
3048 }
3049 
3050 
3051 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
3052  BlockTrampolinePoolScope block_trampoline_pool(this);
3053 
3054  uint32_t imm32;
3055  imm32 = jump_address(L);
3056  { BlockGrowBufferScope block_buf_growth(this);
3057  // Buffer growth (and relocation) must be blocked for internal references
3058  // until associated instructions are emitted and available to be patched.
3059  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3060  lui(at, (imm32 & kHiMask) >> kLuiShift);
3061  ori(at, at, (imm32 & kImm16Mask));
3062  }
3063  jr(at);
3064 
3065  // Emit a nop in the branch delay slot if required.
3066  if (bdslot == PROTECT)
3067  nop();
3068 }
3069 
3070 
3071 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
3072  BlockTrampolinePoolScope block_trampoline_pool(this);
3073 
3074  uint32_t imm32;
3075  imm32 = jump_address(L);
3076  { BlockGrowBufferScope block_buf_growth(this);
3077  // Buffer growth (and relocation) must be blocked for internal references
3078  // until associated instructions are emitted and available to be patched.
3079  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3080  lui(at, (imm32 & kHiMask) >> kLuiShift);
3081  ori(at, at, (imm32 & kImm16Mask));
3082  }
3083  jalr(at);
3084 
3085  // Emit a nop in the branch delay slot if required.
3086  if (bdslot == PROTECT)
3087  nop();
3088 }
3089 
3090 
3091 void MacroAssembler::DropAndRet(int drop) {
3092  Ret(USE_DELAY_SLOT);
3093  addiu(sp, sp, drop * kPointerSize);
3094 }
3095 
3096 void MacroAssembler::DropAndRet(int drop,
3097  Condition cond,
3098  Register r1,
3099  const Operand& r2) {
3100  // Both Drop and Ret need to be conditional.
3101  Label skip;
3102  if (cond != cc_always) {
3103  Branch(&skip, NegateCondition(cond), r1, r2);
3104  }
3105 
3106  Drop(drop);
3107  Ret();
3108 
3109  if (cond != cc_always) {
3110  bind(&skip);
3111  }
3112 }
3113 
3114 
3115 void MacroAssembler::Drop(int count,
3116  Condition cond,
3117  Register reg,
3118  const Operand& op) {
3119  if (count <= 0) {
3120  return;
3121  }
3122 
3123  Label skip;
3124 
3125  if (cond != al) {
3126  Branch(&skip, NegateCondition(cond), reg, op);
3127  }
3128 
3129  addiu(sp, sp, count * kPointerSize);
3130 
3131  if (cond != al) {
3132  bind(&skip);
3133  }
3134 }
3135 
3136 
3137 
3138 void MacroAssembler::Swap(Register reg1,
3139  Register reg2,
3140  Register scratch) {
3141  if (scratch.is(no_reg)) {
3142  Xor(reg1, reg1, Operand(reg2));
3143  Xor(reg2, reg2, Operand(reg1));
3144  Xor(reg1, reg1, Operand(reg2));
3145  } else {
3146  mov(scratch, reg1);
3147  mov(reg1, reg2);
3148  mov(reg2, scratch);
3149  }
3150 }
3151 
3152 
3153 void MacroAssembler::Call(Label* target) {
3154  BranchAndLink(target);
3155 }
3156 
3157 
3158 void MacroAssembler::Push(Handle<Object> handle) {
3159  li(at, Operand(handle));
3160  push(at);
3161 }
3162 
3163 
3164 void MacroAssembler::DebugBreak() {
3165  PrepareCEntryArgs(0);
3166  PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
3167  CEntryStub ces(isolate(), 1);
3168  DCHECK(AllowThisStubCall(&ces));
3169  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3170 }
3171 
3172 
3173 // ---------------------------------------------------------------------------
3174 // Exception handling.
3175 
3176 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3177  int handler_index) {
3178  // Adjust this code if not the case.
3179  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3180  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3181  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3182  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3183  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3184  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3185 
3186  // For the JSEntry handler, we must preserve a0-a3 and s0.
3187  // t1-t3 are available. We will build up the handler from the bottom by
3188  // pushing on the stack.
3189  // Set up the code object (t1) and the state (t2) for pushing.
3190  unsigned state =
3191  StackHandler::IndexField::encode(handler_index) |
3192  StackHandler::KindField::encode(kind);
3193  li(t1, Operand(CodeObject()), CONSTANT_SIZE);
3194  li(t2, Operand(state));
3195 
3196  // Push the frame pointer, context, state, and code object.
3197  if (kind == StackHandler::JS_ENTRY) {
3198  DCHECK_EQ(Smi::FromInt(0), 0);
3199  // The second zero_reg indicates no context.
3200  // The first zero_reg is the NULL frame pointer.
3201  // The operands are reversed to match the order of MultiPush/Pop.
3202  Push(zero_reg, zero_reg, t2, t1);
3203  } else {
3204  MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
3205  }
3206 
3207  // Link the current handler as the next handler.
3208  li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3209  lw(t1, MemOperand(t2));
3210  push(t1);
3211  // Set this new handler as the current one.
3212  sw(sp, MemOperand(t2));
3213 }
3214 
3215 
3216 void MacroAssembler::PopTryHandler() {
3217  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3218  pop(a1);
3219  Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
3220  li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3221  sw(a1, MemOperand(at));
3222 }
3223 
3224 
3225 void MacroAssembler::JumpToHandlerEntry() {
3226  // Compute the handler entry address and jump to it. The handler table is
3227  // a fixed array of (smi-tagged) code offsets.
3228  // v0 = exception, a1 = code object, a2 = state.
3229  lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); // Handler table.
3230  Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3231  srl(a2, a2, StackHandler::kKindWidth); // Handler index.
3232  sll(a2, a2, kPointerSizeLog2);
3233  Addu(a2, a3, a2);
3234  lw(a2, MemOperand(a2)); // Smi-tagged offset.
3235  Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
3236  sra(t9, a2, kSmiTagSize);
3237  Addu(t9, t9, a1);
3238  Jump(t9); // Jump.
3239 }
3240 
3241 
3242 void MacroAssembler::Throw(Register value) {
3243  // Adjust this code if not the case.
3244  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3245  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3246  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3247  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3248  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3249  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3250 
3251  // The exception is expected in v0.
3252  Move(v0, value);
3253 
3254  // Drop the stack pointer to the top of the top handler.
3255  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
3256  isolate())));
3257  lw(sp, MemOperand(a3));
3258 
3259  // Restore the next handler.
3260  pop(a2);
3261  sw(a2, MemOperand(a3));
3262 
3263  // Get the code object (a1) and state (a2). Restore the context and frame
3264  // pointer.
3265  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
3266 
3267  // If the handler is a JS frame, restore the context to the frame.
3268  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
3269  // or cp.
3270  Label done;
3271  Branch(&done, eq, cp, Operand(zero_reg));
3272  sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3273  bind(&done);
3274 
3275  JumpToHandlerEntry();
3276 }
3277 
3278 
3279 void MacroAssembler::ThrowUncatchable(Register value) {
3280  // Adjust this code if not the case.
3281  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3282  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3283  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3284  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3285  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3286  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3287 
3288  // The exception is expected in v0.
3289  if (!value.is(v0)) {
3290  mov(v0, value);
3291  }
3292  // Drop the stack pointer to the top of the top stack handler.
3293  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3294  lw(sp, MemOperand(a3));
3295 
3296  // Unwind the handlers until the ENTRY handler is found.
3297  Label fetch_next, check_kind;
3298  jmp(&check_kind);
3299  bind(&fetch_next);
3300  lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
3301 
3302  bind(&check_kind);
3303  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
3304  lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
3305  And(a2, a2, Operand(StackHandler::KindField::kMask));
3306  Branch(&fetch_next, ne, a2, Operand(zero_reg));
3307 
3308  // Set the top handler address to next handler past the top ENTRY handler.
3309  pop(a2);
3310  sw(a2, MemOperand(a3));
3311 
3312  // Get the code object (a1) and state (a2). Clear the context and frame
3313  // pointer (0 was saved in the handler).
3314  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
3315 
3316  JumpToHandlerEntry();
3317 }
3318 
3319 
3320 void MacroAssembler::Allocate(int object_size,
3321  Register result,
3322  Register scratch1,
3323  Register scratch2,
3324  Label* gc_required,
3326  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3327  if (!FLAG_inline_new) {
3328  if (emit_debug_code()) {
3329  // Trash the registers to simulate an allocation failure.
3330  li(result, 0x7091);
3331  li(scratch1, 0x7191);
3332  li(scratch2, 0x7291);
3333  }
3334  jmp(gc_required);
3335  return;
3336  }
3337 
3338  DCHECK(!result.is(scratch1));
3339  DCHECK(!result.is(scratch2));
3340  DCHECK(!scratch1.is(scratch2));
3341  DCHECK(!scratch1.is(t9));
3342  DCHECK(!scratch2.is(t9));
3343  DCHECK(!result.is(t9));
3344 
3345  // Make object size into bytes.
3346  if ((flags & SIZE_IN_WORDS) != 0) {
3347  object_size *= kPointerSize;
3348  }
3349  DCHECK_EQ(0, object_size & kObjectAlignmentMask);
3350 
3351  // Check relative positions of allocation top and limit addresses.
3352  // ARM adds additional checks to make sure the ldm instruction can be
3353  // used. On MIPS we don't have ldm so we don't need additional checks either.
3354  ExternalReference allocation_top =
3355  AllocationUtils::GetAllocationTopReference(isolate(), flags);
3356  ExternalReference allocation_limit =
3357  AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3358 
3359  intptr_t top =
3360  reinterpret_cast<intptr_t>(allocation_top.address());
3361  intptr_t limit =
3362  reinterpret_cast<intptr_t>(allocation_limit.address());
3363  DCHECK((limit - top) == kPointerSize);
3364 
3365  // Set up allocation top address and object size registers.
3366  Register topaddr = scratch1;
3367  li(topaddr, Operand(allocation_top));
3368 
3369  // This code stores a temporary value in t9.
3370  if ((flags & RESULT_CONTAINS_TOP) == 0) {
3371  // Load allocation top into result and allocation limit into t9.
3372  lw(result, MemOperand(topaddr));
3373  lw(t9, MemOperand(topaddr, kPointerSize));
3374  } else {
3375  if (emit_debug_code()) {
3376  // Assert that result actually contains top on entry. t9 is used
3377  // immediately below so this use of t9 does not cause difference with
3378  // respect to register content between debug and release mode.
3379  lw(t9, MemOperand(topaddr));
3380  Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3381  }
3382  // Load allocation limit into t9. Result already contains allocation top.
3383  lw(t9, MemOperand(topaddr, limit - top));
3384  }
3385 
3386  if ((flags & DOUBLE_ALIGNMENT) != 0) {
3387  // Align the next allocation. Storing the filler map without checking top is
3388  // safe in new-space because the limit of the heap is aligned there.
3391  And(scratch2, result, Operand(kDoubleAlignmentMask));
3392  Label aligned;
3393  Branch(&aligned, eq, scratch2, Operand(zero_reg));
3394  if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
3395  Branch(gc_required, Ugreater_equal, result, Operand(t9));
3396  }
3397  li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3398  sw(scratch2, MemOperand(result));
3399  Addu(result, result, Operand(kDoubleSize / 2));
3400  bind(&aligned);
3401  }
3402 
3403  // Calculate new top and bail out if new space is exhausted. Use result
3404  // to calculate the new top.
3405  Addu(scratch2, result, Operand(object_size));
3406  Branch(gc_required, Ugreater, scratch2, Operand(t9));
3407  sw(scratch2, MemOperand(topaddr));
3408 
3409  // Tag object if requested.
3410  if ((flags & TAG_OBJECT) != 0) {
3411  Addu(result, result, Operand(kHeapObjectTag));
3412  }
3413 }
3414 
3415 
3416 void MacroAssembler::Allocate(Register object_size,
3417  Register result,
3418  Register scratch1,
3419  Register scratch2,
3420  Label* gc_required,
3422  if (!FLAG_inline_new) {
3423  if (emit_debug_code()) {
3424  // Trash the registers to simulate an allocation failure.
3425  li(result, 0x7091);
3426  li(scratch1, 0x7191);
3427  li(scratch2, 0x7291);
3428  }
3429  jmp(gc_required);
3430  return;
3431  }
3432 
3433  DCHECK(!result.is(scratch1));
3434  DCHECK(!result.is(scratch2));
3435  DCHECK(!scratch1.is(scratch2));
3436  DCHECK(!object_size.is(t9));
3437  DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
3438 
3439  // Check relative positions of allocation top and limit addresses.
3440  // ARM adds additional checks to make sure the ldm instruction can be
3441  // used. On MIPS we don't have ldm so we don't need additional checks either.
3442  ExternalReference allocation_top =
3443  AllocationUtils::GetAllocationTopReference(isolate(), flags);
3444  ExternalReference allocation_limit =
3445  AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3446  intptr_t top =
3447  reinterpret_cast<intptr_t>(allocation_top.address());
3448  intptr_t limit =
3449  reinterpret_cast<intptr_t>(allocation_limit.address());
3450  DCHECK((limit - top) == kPointerSize);
3451 
3452  // Set up allocation top address and object size registers.
3453  Register topaddr = scratch1;
3454  li(topaddr, Operand(allocation_top));
3455 
3456  // This code stores a temporary value in t9.
3457  if ((flags & RESULT_CONTAINS_TOP) == 0) {
3458  // Load allocation top into result and allocation limit into t9.
3459  lw(result, MemOperand(topaddr));
3460  lw(t9, MemOperand(topaddr, kPointerSize));
3461  } else {
3462  if (emit_debug_code()) {
3463  // Assert that result actually contains top on entry. t9 is used
3464  // immediately below so this use of t9 does not cause difference with
3465  // respect to register content between debug and release mode.
3466  lw(t9, MemOperand(topaddr));
3467  Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3468  }
3469  // Load allocation limit into t9. Result already contains allocation top.
3470  lw(t9, MemOperand(topaddr, limit - top));
3471  }
3472 
3473  if ((flags & DOUBLE_ALIGNMENT) != 0) {
3474  // Align the next allocation. Storing the filler map without checking top is
3475  // safe in new-space because the limit of the heap is aligned there.
3478  And(scratch2, result, Operand(kDoubleAlignmentMask));
3479  Label aligned;
3480  Branch(&aligned, eq, scratch2, Operand(zero_reg));
3481  if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
3482  Branch(gc_required, Ugreater_equal, result, Operand(t9));
3483  }
3484  li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3485  sw(scratch2, MemOperand(result));
3486  Addu(result, result, Operand(kDoubleSize / 2));
3487  bind(&aligned);
3488  }
3489 
3490  // Calculate new top and bail out if new space is exhausted. Use result
3491  // to calculate the new top. Object size may be in words so a shift is
3492  // required to get the number of bytes.
3493  if ((flags & SIZE_IN_WORDS) != 0) {
3494  sll(scratch2, object_size, kPointerSizeLog2);
3495  Addu(scratch2, result, scratch2);
3496  } else {
3497  Addu(scratch2, result, Operand(object_size));
3498  }
3499  Branch(gc_required, Ugreater, scratch2, Operand(t9));
3500 
3501  // Update allocation top. result temporarily holds the new top.
3502  if (emit_debug_code()) {
3503  And(t9, scratch2, Operand(kObjectAlignmentMask));
3504  Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3505  }
3506  sw(scratch2, MemOperand(topaddr));
3507 
3508  // Tag object if requested.
3509  if ((flags & TAG_OBJECT) != 0) {
3510  Addu(result, result, Operand(kHeapObjectTag));
3511  }
3512 }
3513 
3514 
3515 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3516  Register scratch) {
3517  ExternalReference new_space_allocation_top =
3518  ExternalReference::new_space_allocation_top_address(isolate());
3519 
3520  // Make sure the object has no tag before resetting top.
3521  And(object, object, Operand(~kHeapObjectTagMask));
3522 #ifdef DEBUG
3523  // Check that the object un-allocated is below the current top.
3524  li(scratch, Operand(new_space_allocation_top));
3525  lw(scratch, MemOperand(scratch));
3526  Check(less, kUndoAllocationOfNonAllocatedMemory,
3527  object, Operand(scratch));
3528 #endif
3529  // Write the address of the object to un-allocate as the current top.
3530  li(scratch, Operand(new_space_allocation_top));
3531  sw(object, MemOperand(scratch));
3532 }
3533 
3534 
3535 void MacroAssembler::AllocateTwoByteString(Register result,
3536  Register length,
3537  Register scratch1,
3538  Register scratch2,
3539  Register scratch3,
3540  Label* gc_required) {
3541  // Calculate the number of bytes needed for the characters in the string while
3542  // observing object alignment.
3543  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3544  sll(scratch1, length, 1); // Length in bytes, not chars.
3545  addiu(scratch1, scratch1,
3546  kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3547  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3548 
3549  // Allocate two-byte string in new space.
3550  Allocate(scratch1,
3551  result,
3552  scratch2,
3553  scratch3,
3554  gc_required,
3555  TAG_OBJECT);
3556 
3557  // Set the map, length and hash field.
3558  InitializeNewString(result,
3559  length,
3560  Heap::kStringMapRootIndex,
3561  scratch1,
3562  scratch2);
3563 }
3564 
3565 
3566 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3567  Register scratch1, Register scratch2,
3568  Register scratch3,
3569  Label* gc_required) {
3570  // Calculate the number of bytes needed for the characters in the string
3571  // while observing object alignment.
3572  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3573  DCHECK(kCharSize == 1);
3574  addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3575  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3576 
3577  // Allocate one-byte string in new space.
3578  Allocate(scratch1,
3579  result,
3580  scratch2,
3581  scratch3,
3582  gc_required,
3583  TAG_OBJECT);
3584 
3585  // Set the map, length and hash field.
3586  InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3587  scratch1, scratch2);
3588 }
3589 
3590 
3591 void MacroAssembler::AllocateTwoByteConsString(Register result,
3592  Register length,
3593  Register scratch1,
3594  Register scratch2,
3595  Label* gc_required) {
3596  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3597  TAG_OBJECT);
3598  InitializeNewString(result,
3599  length,
3600  Heap::kConsStringMapRootIndex,
3601  scratch1,
3602  scratch2);
3603 }
3604 
3605 
3606 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3607  Register scratch1,
3608  Register scratch2,
3609  Label* gc_required) {
3610  Allocate(ConsString::kSize,
3611  result,
3612  scratch1,
3613  scratch2,
3614  gc_required,
3615  TAG_OBJECT);
3616 
3617  InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3618  scratch1, scratch2);
3619 }
3620 
3621 
3622 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3623  Register length,
3624  Register scratch1,
3625  Register scratch2,
3626  Label* gc_required) {
3627  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3628  TAG_OBJECT);
3629 
3630  InitializeNewString(result,
3631  length,
3632  Heap::kSlicedStringMapRootIndex,
3633  scratch1,
3634  scratch2);
3635 }
3636 
3637 
3638 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3639  Register length,
3640  Register scratch1,
3641  Register scratch2,
3642  Label* gc_required) {
3643  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3644  TAG_OBJECT);
3645 
3646  InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3647  scratch1, scratch2);
3648 }
3649 
3650 
3651 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3652  Label* not_unique_name) {
3654  Label succeed;
3655  And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3656  Branch(&succeed, eq, at, Operand(zero_reg));
3657  Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3658 
3659  bind(&succeed);
3660 }
3661 
3662 
3663 // Allocates a heap number or jumps to the label if the young space is full and
3664 // a scavenge is needed.
3665 void MacroAssembler::AllocateHeapNumber(Register result,
3666  Register scratch1,
3667  Register scratch2,
3668  Register heap_number_map,
3669  Label* need_gc,
3670  TaggingMode tagging_mode,
3671  MutableMode mode) {
3672  // Allocate an object in the heap for the heap number and tag it as a heap
3673  // object.
3674  Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3675  tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3676 
3677  Heap::RootListIndex map_index = mode == MUTABLE
3678  ? Heap::kMutableHeapNumberMapRootIndex
3679  : Heap::kHeapNumberMapRootIndex;
3680  AssertIsRoot(heap_number_map, map_index);
3681 
3682  // Store heap number map in the allocated object.
3683  if (tagging_mode == TAG_RESULT) {
3684  sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3685  } else {
3686  sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3687  }
3688 }
3689 
3690 
3691 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3692  FPURegister value,
3693  Register scratch1,
3694  Register scratch2,
3695  Label* gc_required) {
3696  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3697  AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3698  sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3699 }
3700 
3701 
3702 // Copies a fixed number of fields of heap objects from src to dst.
3703 void MacroAssembler::CopyFields(Register dst,
3704  Register src,
3705  RegList temps,
3706  int field_count) {
3707  DCHECK((temps & dst.bit()) == 0);
3708  DCHECK((temps & src.bit()) == 0);
3709  // Primitive implementation using only one temporary register.
3710 
3711  Register tmp = no_reg;
3712  // Find a temp register in temps list.
3713  for (int i = 0; i < kNumRegisters; i++) {
3714  if ((temps & (1 << i)) != 0) {
3715  tmp.code_ = i;
3716  break;
3717  }
3718  }
3719  DCHECK(!tmp.is(no_reg));
3720 
3721  for (int i = 0; i < field_count; i++) {
3722  lw(tmp, FieldMemOperand(src, i * kPointerSize));
3723  sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3724  }
3725 }
3726 
3727 
3728 void MacroAssembler::CopyBytes(Register src,
3729  Register dst,
3730  Register length,
3731  Register scratch) {
3732  Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3733 
3734  // Align src before copying in word size chunks.
3735  Branch(&byte_loop, le, length, Operand(kPointerSize));
3736  bind(&align_loop_1);
3737  And(scratch, src, kPointerSize - 1);
3738  Branch(&word_loop, eq, scratch, Operand(zero_reg));
3739  lbu(scratch, MemOperand(src));
3740  Addu(src, src, 1);
3741  sb(scratch, MemOperand(dst));
3742  Addu(dst, dst, 1);
3743  Subu(length, length, Operand(1));
3744  Branch(&align_loop_1, ne, length, Operand(zero_reg));
3745 
3746  // Copy bytes in word size chunks.
3747  bind(&word_loop);
3748  if (emit_debug_code()) {
3749  And(scratch, src, kPointerSize - 1);
3750  Assert(eq, kExpectingAlignmentForCopyBytes,
3751  scratch, Operand(zero_reg));
3752  }
3753  Branch(&byte_loop, lt, length, Operand(kPointerSize));
3754  lw(scratch, MemOperand(src));
3755  Addu(src, src, kPointerSize);
3756 
3757  // TODO(kalmard) check if this can be optimized to use sw in most cases.
3758  // Can't use unaligned access - copy byte by byte.
3759  if (kArchEndian == kLittle) {
3760  sb(scratch, MemOperand(dst, 0));
3761  srl(scratch, scratch, 8);
3762  sb(scratch, MemOperand(dst, 1));
3763  srl(scratch, scratch, 8);
3764  sb(scratch, MemOperand(dst, 2));
3765  srl(scratch, scratch, 8);
3766  sb(scratch, MemOperand(dst, 3));
3767  } else {
3768  sb(scratch, MemOperand(dst, 3));
3769  srl(scratch, scratch, 8);
3770  sb(scratch, MemOperand(dst, 2));
3771  srl(scratch, scratch, 8);
3772  sb(scratch, MemOperand(dst, 1));
3773  srl(scratch, scratch, 8);
3774  sb(scratch, MemOperand(dst, 0));
3775  }
3776 
3777  Addu(dst, dst, 4);
3778 
3779  Subu(length, length, Operand(kPointerSize));
3780  Branch(&word_loop);
3781 
3782  // Copy the last bytes if any left.
3783  bind(&byte_loop);
3784  Branch(&done, eq, length, Operand(zero_reg));
3785  bind(&byte_loop_1);
3786  lbu(scratch, MemOperand(src));
3787  Addu(src, src, 1);
3788  sb(scratch, MemOperand(dst));
3789  Addu(dst, dst, 1);
3790  Subu(length, length, Operand(1));
3791  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3792  bind(&done);
3793 }
3794 
3795 
3796 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3797  Register end_offset,
3798  Register filler) {
3799  Label loop, entry;
3800  Branch(&entry);
3801  bind(&loop);
3802  sw(filler, MemOperand(start_offset));
3803  Addu(start_offset, start_offset, kPointerSize);
3804  bind(&entry);
3805  Branch(&loop, lt, start_offset, Operand(end_offset));
3806 }
3807 
3808 
3809 void MacroAssembler::CheckFastElements(Register map,
3810  Register scratch,
3811  Label* fail) {
3816  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3817  Branch(fail, hi, scratch,
3818  Operand(Map::kMaximumBitField2FastHoleyElementValue));
3819 }
3820 
3821 
3822 void MacroAssembler::CheckFastObjectElements(Register map,
3823  Register scratch,
3824  Label* fail) {
3829  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3830  Branch(fail, ls, scratch,
3831  Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3832  Branch(fail, hi, scratch,
3833  Operand(Map::kMaximumBitField2FastHoleyElementValue));
3834 }
3835 
3836 
3837 void MacroAssembler::CheckFastSmiElements(Register map,
3838  Register scratch,
3839  Label* fail) {
3842  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3843  Branch(fail, hi, scratch,
3844  Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3845 }
3846 
3847 
3848 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3849  Register key_reg,
3850  Register elements_reg,
3851  Register scratch1,
3852  Register scratch2,
3853  Register scratch3,
3854  Label* fail,
3855  int elements_offset) {
3856  Label smi_value, maybe_nan, have_double_value, is_nan, done;
3857  Register mantissa_reg = scratch2;
3858  Register exponent_reg = scratch3;
3859 
3860  // Handle smi values specially.
3861  JumpIfSmi(value_reg, &smi_value);
3862 
3863  // Ensure that the object is a heap number
3864  CheckMap(value_reg,
3865  scratch1,
3866  Heap::kHeapNumberMapRootIndex,
3867  fail,
3869 
3870  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3871  // in the exponent.
3872  li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3873  lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3874  Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3875 
3876  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3877 
3878  bind(&have_double_value);
3879  sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3880  Addu(scratch1, scratch1, elements_reg);
3881  sw(mantissa_reg,
3882  FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3883  + kHoleNanLower32Offset));
3884  sw(exponent_reg,
3885  FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3886  + kHoleNanUpper32Offset));
3887  jmp(&done);
3888 
3889  bind(&maybe_nan);
3890  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3891  // it's an Infinity, and the non-NaN code path applies.
3892  Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3893  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3894  Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3895  bind(&is_nan);
3896  // Load canonical NaN for storing into the double array.
3897  LoadRoot(at, Heap::kNanValueRootIndex);
3898  lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
3899  lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
3900  jmp(&have_double_value);
3901 
3902  bind(&smi_value);
3903  Addu(scratch1, elements_reg,
3904  Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3905  elements_offset));
3906  sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3907  Addu(scratch1, scratch1, scratch2);
3908  // scratch1 is now effective address of the double element
3909 
3910  Register untagged_value = elements_reg;
3911  SmiUntag(untagged_value, value_reg);
3912  mtc1(untagged_value, f2);
3913  cvt_d_w(f0, f2);
3914  sdc1(f0, MemOperand(scratch1, 0));
3915  bind(&done);
3916 }
3917 
3918 
3919 void MacroAssembler::CompareMapAndBranch(Register obj,
3920  Register scratch,
3921  Handle<Map> map,
3922  Label* early_success,
3923  Condition cond,
3924  Label* branch_to) {
3925  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3926  CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3927 }
3928 
3929 
3930 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3931  Handle<Map> map,
3932  Label* early_success,
3933  Condition cond,
3934  Label* branch_to) {
3935  Branch(branch_to, cond, obj_map, Operand(map));
3936 }
3937 
3938 
3939 void MacroAssembler::CheckMap(Register obj,
3940  Register scratch,
3941  Handle<Map> map,
3942  Label* fail,
3943  SmiCheckType smi_check_type) {
3944  if (smi_check_type == DO_SMI_CHECK) {
3945  JumpIfSmi(obj, fail);
3946  }
3947  Label success;
3948  CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3949  bind(&success);
3950 }
3951 
3952 
3953 void MacroAssembler::DispatchMap(Register obj,
3954  Register scratch,
3955  Handle<Map> map,
3956  Handle<Code> success,
3957  SmiCheckType smi_check_type) {
3958  Label fail;
3959  if (smi_check_type == DO_SMI_CHECK) {
3960  JumpIfSmi(obj, &fail);
3961  }
3962  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3963  Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3964  bind(&fail);
3965 }
3966 
3967 
3968 void MacroAssembler::CheckMap(Register obj,
3969  Register scratch,
3970  Heap::RootListIndex index,
3971  Label* fail,
3972  SmiCheckType smi_check_type) {
3973  if (smi_check_type == DO_SMI_CHECK) {
3974  JumpIfSmi(obj, fail);
3975  }
3976  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3977  LoadRoot(at, index);
3978  Branch(fail, ne, scratch, Operand(at));
3979 }
3980 
3981 
3982 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
3983  if (IsMipsSoftFloatABI) {
3984  if (kArchEndian == kLittle) {
3985  Move(dst, v0, v1);
3986  } else {
3987  Move(dst, v1, v0);
3988  }
3989  } else {
3990  Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3991  }
3992 }
3993 
3994 
3995 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
3996  if (IsMipsSoftFloatABI) {
3997  if (kArchEndian == kLittle) {
3998  Move(dst, a0, a1);
3999  } else {
4000  Move(dst, a1, a0);
4001  }
4002  } else {
4003  Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
4004  }
4005 }
4006 
4007 
4008 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4009  if (!IsMipsSoftFloatABI) {
4010  Move(f12, src);
4011  } else {
4012  if (kArchEndian == kLittle) {
4013  Move(a0, a1, src);
4014  } else {
4015  Move(a1, a0, src);
4016  }
4017  }
4018 }
4019 
4020 
4021 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4022  if (!IsMipsSoftFloatABI) {
4023  Move(f0, src);
4024  } else {
4025  if (kArchEndian == kLittle) {
4026  Move(v0, v1, src);
4027  } else {
4028  Move(v1, v0, src);
4029  }
4030  }
4031 }
4032 
4033 
4034 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4035  DoubleRegister src2) {
4036  if (!IsMipsSoftFloatABI) {
4037  if (src2.is(f12)) {
4038  DCHECK(!src1.is(f14));
4039  Move(f14, src2);
4040  Move(f12, src1);
4041  } else {
4042  Move(f12, src1);
4043  Move(f14, src2);
4044  }
4045  } else {
4046  if (kArchEndian == kLittle) {
4047  Move(a0, a1, src1);
4048  Move(a2, a3, src2);
4049  } else {
4050  Move(a1, a0, src1);
4051  Move(a3, a2, src2);
4052  }
4053  }
4054 }
4055 
4056 
4057 // -----------------------------------------------------------------------------
4058 // JavaScript invokes.
4059 
4060 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4061  const ParameterCount& actual,
4062  Handle<Code> code_constant,
4063  Register code_reg,
4064  Label* done,
4065  bool* definitely_mismatches,
4066  InvokeFlag flag,
4067  const CallWrapper& call_wrapper) {
4068  bool definitely_matches = false;
4069  *definitely_mismatches = false;
4070  Label regular_invoke;
4071 
4072  // Check whether the expected and actual arguments count match. If not,
4073  // setup registers according to contract with ArgumentsAdaptorTrampoline:
4074  // a0: actual arguments count
4075  // a1: function (passed through to callee)
4076  // a2: expected arguments count
4077 
4078  // The code below is made a lot easier because the calling code already sets
4079  // up actual and expected registers according to the contract if values are
4080  // passed in registers.
4081  DCHECK(actual.is_immediate() || actual.reg().is(a0));
4082  DCHECK(expected.is_immediate() || expected.reg().is(a2));
4083  DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
4084 
4085  if (expected.is_immediate()) {
4086  DCHECK(actual.is_immediate());
4087  if (expected.immediate() == actual.immediate()) {
4088  definitely_matches = true;
4089  } else {
4090  li(a0, Operand(actual.immediate()));
4091  const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4092  if (expected.immediate() == sentinel) {
4093  // Don't worry about adapting arguments for builtins that
4094  // don't want that done. Skip adaption code by making it look
4095  // like we have a match between expected and actual number of
4096  // arguments.
4097  definitely_matches = true;
4098  } else {
4099  *definitely_mismatches = true;
4100  li(a2, Operand(expected.immediate()));
4101  }
4102  }
4103  } else if (actual.is_immediate()) {
4104  Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
4105  li(a0, Operand(actual.immediate()));
4106  } else {
4107  Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
4108  }
4109 
4110  if (!definitely_matches) {
4111  if (!code_constant.is_null()) {
4112  li(a3, Operand(code_constant));
4113  addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
4114  }
4115 
4116  Handle<Code> adaptor =
4117  isolate()->builtins()->ArgumentsAdaptorTrampoline();
4118  if (flag == CALL_FUNCTION) {
4119  call_wrapper.BeforeCall(CallSize(adaptor));
4120  Call(adaptor);
4121  call_wrapper.AfterCall();
4122  if (!*definitely_mismatches) {
4123  Branch(done);
4124  }
4125  } else {
4126  Jump(adaptor, RelocInfo::CODE_TARGET);
4127  }
4128  bind(&regular_invoke);
4129  }
4130 }
4131 
4132 
4133 void MacroAssembler::InvokeCode(Register code,
4134  const ParameterCount& expected,
4135  const ParameterCount& actual,
4136  InvokeFlag flag,
4137  const CallWrapper& call_wrapper) {
4138  // You can't call a function without a valid frame.
4139  DCHECK(flag == JUMP_FUNCTION || has_frame());
4140 
4141  Label done;
4142 
4143  bool definitely_mismatches = false;
4144  InvokePrologue(expected, actual, Handle<Code>::null(), code,
4145  &done, &definitely_mismatches, flag,
4146  call_wrapper);
4147  if (!definitely_mismatches) {
4148  if (flag == CALL_FUNCTION) {
4149  call_wrapper.BeforeCall(CallSize(code));
4150  Call(code);
4151  call_wrapper.AfterCall();
4152  } else {
4154  Jump(code);
4155  }
4156  // Continue here if InvokePrologue does handle the invocation due to
4157  // mismatched parameter counts.
4158  bind(&done);
4159  }
4160 }
4161 
4162 
4163 void MacroAssembler::InvokeFunction(Register function,
4164  const ParameterCount& actual,
4165  InvokeFlag flag,
4166  const CallWrapper& call_wrapper) {
4167  // You can't call a function without a valid frame.
4168  DCHECK(flag == JUMP_FUNCTION || has_frame());
4169 
4170  // Contract with called JS functions requires that function is passed in a1.
4171  DCHECK(function.is(a1));
4172  Register expected_reg = a2;
4173  Register code_reg = a3;
4174 
4175  lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4176  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4177  lw(expected_reg,
4178  FieldMemOperand(code_reg,
4179  SharedFunctionInfo::kFormalParameterCountOffset));
4180  sra(expected_reg, expected_reg, kSmiTagSize);
4181  lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4182 
4183  ParameterCount expected(expected_reg);
4184  InvokeCode(code_reg, expected, actual, flag, call_wrapper);
4185 }
4186 
4187 
4188 void MacroAssembler::InvokeFunction(Register function,
4189  const ParameterCount& expected,
4190  const ParameterCount& actual,
4191  InvokeFlag flag,
4192  const CallWrapper& call_wrapper) {
4193  // You can't call a function without a valid frame.
4194  DCHECK(flag == JUMP_FUNCTION || has_frame());
4195 
4196  // Contract with called JS functions requires that function is passed in a1.
4197  DCHECK(function.is(a1));
4198 
4199  // Get the function and setup the context.
4200  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4201 
4202  // We call indirectly through the code field in the function to
4203  // allow recompilation to take effect without changing any of the
4204  // call sites.
4205  lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4206  InvokeCode(a3, expected, actual, flag, call_wrapper);
4207 }
4208 
4209 
4210 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4211  const ParameterCount& expected,
4212  const ParameterCount& actual,
4213  InvokeFlag flag,
4214  const CallWrapper& call_wrapper) {
4215  li(a1, function);
4216  InvokeFunction(a1, expected, actual, flag, call_wrapper);
4217 }
4218 
4219 
4220 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
4221  Register map,
4222  Register scratch,
4223  Label* fail) {
4224  lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
4225  IsInstanceJSObjectType(map, scratch, fail);
4226 }
4227 
4228 
4229 void MacroAssembler::IsInstanceJSObjectType(Register map,
4230  Register scratch,
4231  Label* fail) {
4232  lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
4233  Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
4234  Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
4235 }
4236 
4237 
4238 void MacroAssembler::IsObjectJSStringType(Register object,
4239  Register scratch,
4240  Label* fail) {
4241  DCHECK(kNotStringTag != 0);
4242 
4243  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4244  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4245  And(scratch, scratch, Operand(kIsNotStringMask));
4246  Branch(fail, ne, scratch, Operand(zero_reg));
4247 }
4248 
4249 
4250 void MacroAssembler::IsObjectNameType(Register object,
4251  Register scratch,
4252  Label* fail) {
4253  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4254  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4255  Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4256 }
4257 
4258 
4259 // ---------------------------------------------------------------------------
4260 // Support functions.
4261 
4262 
4263 void MacroAssembler::TryGetFunctionPrototype(Register function,
4264  Register result,
4265  Register scratch,
4266  Label* miss,
4267  bool miss_on_bound_function) {
4268  Label non_instance;
4269  if (miss_on_bound_function) {
4270  // Check that the receiver isn't a smi.
4271  JumpIfSmi(function, miss);
4272 
4273  // Check that the function really is a function. Load map into result reg.
4274  GetObjectType(function, result, scratch);
4275  Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
4276 
4277  lw(scratch,
4278  FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
4279  lw(scratch,
4280  FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
4281  And(scratch, scratch,
4282  Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
4283  Branch(miss, ne, scratch, Operand(zero_reg));
4284 
4285  // Make sure that the function has an instance prototype.
4286  lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
4287  And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
4288  Branch(&non_instance, ne, scratch, Operand(zero_reg));
4289  }
4290 
4291  // Get the prototype or initial map from the function.
4292  lw(result,
4293  FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4294 
4295  // If the prototype or initial map is the hole, don't return it and
4296  // simply miss the cache instead. This will allow us to allocate a
4297  // prototype object on-demand in the runtime system.
4298  LoadRoot(t8, Heap::kTheHoleValueRootIndex);
4299  Branch(miss, eq, result, Operand(t8));
4300 
4301  // If the function does not have an initial map, we're done.
4302  Label done;
4303  GetObjectType(result, scratch, scratch);
4304  Branch(&done, ne, scratch, Operand(MAP_TYPE));
4305 
4306  // Get the prototype from the initial map.
4307  lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
4308 
4309  if (miss_on_bound_function) {
4310  jmp(&done);
4311 
4312  // Non-instance prototype: Fetch prototype from constructor field
4313  // in initial map.
4314  bind(&non_instance);
4315  lw(result, FieldMemOperand(result, Map::kConstructorOffset));
4316  }
4317 
4318  // All done.
4319  bind(&done);
4320 }
4321 
4322 
4323 void MacroAssembler::GetObjectType(Register object,
4324  Register map,
4325  Register type_reg) {
4326  lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
4327  lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4328 }
4329 
4330 
4331 // -----------------------------------------------------------------------------
4332 // Runtime calls.
4333 
4334 void MacroAssembler::CallStub(CodeStub* stub,
4335  TypeFeedbackId ast_id,
4336  Condition cond,
4337  Register r1,
4338  const Operand& r2,
4339  BranchDelaySlot bd) {
4340  DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
4341  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4342  cond, r1, r2, bd);
4343 }
4344 
4345 
4346 void MacroAssembler::TailCallStub(CodeStub* stub,
4347  Condition cond,
4348  Register r1,
4349  const Operand& r2,
4350  BranchDelaySlot bd) {
4351  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4352 }
4353 
4354 
4355 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
4356  return ref0.address() - ref1.address();
4357 }
4358 
4359 
4360 void MacroAssembler::CallApiFunctionAndReturn(
4361  Register function_address,
4362  ExternalReference thunk_ref,
4363  int stack_space,
4364  MemOperand return_value_operand,
4365  MemOperand* context_restore_operand) {
4366  ExternalReference next_address =
4367  ExternalReference::handle_scope_next_address(isolate());
4368  const int kNextOffset = 0;
4369  const int kLimitOffset = AddressOffset(
4370  ExternalReference::handle_scope_limit_address(isolate()),
4371  next_address);
4372  const int kLevelOffset = AddressOffset(
4373  ExternalReference::handle_scope_level_address(isolate()),
4374  next_address);
4375 
4376  DCHECK(function_address.is(a1) || function_address.is(a2));
4377 
4378  Label profiler_disabled;
4379  Label end_profiler_check;
4380  li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
4381  lb(t9, MemOperand(t9, 0));
4382  Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
4383 
4384  // Additional parameter is the address of the actual callback.
4385  li(t9, Operand(thunk_ref));
4386  jmp(&end_profiler_check);
4387 
4388  bind(&profiler_disabled);
4389  mov(t9, function_address);
4390  bind(&end_profiler_check);
4391 
4392  // Allocate HandleScope in callee-save registers.
4393  li(s3, Operand(next_address));
4394  lw(s0, MemOperand(s3, kNextOffset));
4395  lw(s1, MemOperand(s3, kLimitOffset));
4396  lw(s2, MemOperand(s3, kLevelOffset));
4397  Addu(s2, s2, Operand(1));
4398  sw(s2, MemOperand(s3, kLevelOffset));
4399 
4400  if (FLAG_log_timer_events) {
4401  FrameScope frame(this, StackFrame::MANUAL);
4402  PushSafepointRegisters();
4403  PrepareCallCFunction(1, a0);
4404  li(a0, Operand(ExternalReference::isolate_address(isolate())));
4405  CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
4406  PopSafepointRegisters();
4407  }
4408 
4409  // Native call returns to the DirectCEntry stub which redirects to the
4410  // return address pushed on stack (could have moved after GC).
4411  // DirectCEntry stub itself is generated early and never moves.
4412  DirectCEntryStub stub(isolate());
4413  stub.GenerateCall(this, t9);
4414 
4415  if (FLAG_log_timer_events) {
4416  FrameScope frame(this, StackFrame::MANUAL);
4417  PushSafepointRegisters();
4418  PrepareCallCFunction(1, a0);
4419  li(a0, Operand(ExternalReference::isolate_address(isolate())));
4420  CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
4421  PopSafepointRegisters();
4422  }
4423 
4424  Label promote_scheduled_exception;
4425  Label exception_handled;
4426  Label delete_allocated_handles;
4427  Label leave_exit_frame;
4428  Label return_value_loaded;
4429 
4430  // Load value from ReturnValue.
4431  lw(v0, return_value_operand);
4432  bind(&return_value_loaded);
4433 
4434  // No more valid handles (the result handle was the last one). Restore
4435  // previous handle scope.
4436  sw(s0, MemOperand(s3, kNextOffset));
4437  if (emit_debug_code()) {
4438  lw(a1, MemOperand(s3, kLevelOffset));
4439  Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
4440  }
4441  Subu(s2, s2, Operand(1));
4442  sw(s2, MemOperand(s3, kLevelOffset));
4443  lw(at, MemOperand(s3, kLimitOffset));
4444  Branch(&delete_allocated_handles, ne, s1, Operand(at));
4445 
4446  // Check if the function scheduled an exception.
4447  bind(&leave_exit_frame);
4448  LoadRoot(t0, Heap::kTheHoleValueRootIndex);
4449  li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
4450  lw(t1, MemOperand(at));
4451  Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
4452  bind(&exception_handled);
4453 
4454  bool restore_context = context_restore_operand != NULL;
4455  if (restore_context) {
4456  lw(cp, *context_restore_operand);
4457  }
4458  li(s0, Operand(stack_space));
4459  LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
4460 
4461  bind(&promote_scheduled_exception);
4462  {
4463  FrameScope frame(this, StackFrame::INTERNAL);
4464  CallExternalReference(
4465  ExternalReference(Runtime::kPromoteScheduledException, isolate()),
4466  0);
4467  }
4468  jmp(&exception_handled);
4469 
4470  // HandleScope limit has changed. Delete allocated extensions.
4471  bind(&delete_allocated_handles);
4472  sw(s1, MemOperand(s3, kLimitOffset));
4473  mov(s0, v0);
4474  mov(a0, v0);
4475  PrepareCallCFunction(1, s1);
4476  li(a0, Operand(ExternalReference::isolate_address(isolate())));
4477  CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4478  1);
4479  mov(v0, s0);
4480  jmp(&leave_exit_frame);
4481 }
4482 
4483 
4484 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4485  return has_frame_ || !stub->SometimesSetsUpAFrame();
4486 }
4487 
4488 
4489 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4490  // If the hash field contains an array index pick it out. The assert checks
4491  // that the constants for the maximum number of digits for an array index
4492  // cached in the hash field and the number of bits reserved for it does not
4493  // conflict.
4494  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4495  (1 << String::kArrayIndexValueBits));
4496  DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4497 }
4498 
4499 
4500 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4501  FPURegister result,
4502  Register scratch1,
4503  Register scratch2,
4504  Register heap_number_map,
4505  Label* not_number,
4507  Label done;
4508  if ((flags & OBJECT_NOT_SMI) == 0) {
4509  Label not_smi;
4510  JumpIfNotSmi(object, &not_smi);
4511  // Remove smi tag and convert to double.
4512  sra(scratch1, object, kSmiTagSize);
4513  mtc1(scratch1, result);
4514  cvt_d_w(result, result);
4515  Branch(&done);
4516  bind(&not_smi);
4517  }
4518  // Check for heap number and load double value from it.
4519  lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4520  Branch(not_number, ne, scratch1, Operand(heap_number_map));
4521 
4522  if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4523  // If exponent is all ones the number is either a NaN or +/-Infinity.
4524  Register exponent = scratch1;
4525  Register mask_reg = scratch2;
4526  lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4527  li(mask_reg, HeapNumber::kExponentMask);
4528 
4529  And(exponent, exponent, mask_reg);
4530  Branch(not_number, eq, exponent, Operand(mask_reg));
4531  }
4532  ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4533  bind(&done);
4534 }
4535 
4536 
4537 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4538  FPURegister value,
4539  Register scratch1) {
4540  sra(scratch1, smi, kSmiTagSize);
4541  mtc1(scratch1, value);
4542  cvt_d_w(value, value);
4543 }
4544 
4545 
4546 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4547  const Operand& right,
4548  Register overflow_dst,
4549  Register scratch) {
4550  if (right.is_reg()) {
4551  AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4552  } else {
4553  if (dst.is(left)) {
4554  mov(scratch, left); // Preserve left.
4555  addiu(dst, left, right.immediate()); // Left is overwritten.
4556  xor_(scratch, dst, scratch); // Original left.
4557  // Load right since xori takes uint16 as immediate.
4558  addiu(t9, zero_reg, right.immediate());
4559  xor_(overflow_dst, dst, t9);
4560  and_(overflow_dst, overflow_dst, scratch);
4561  } else {
4562  addiu(dst, left, right.immediate());
4563  xor_(overflow_dst, dst, left);
4564  // Load right since xori takes uint16 as immediate.
4565  addiu(t9, zero_reg, right.immediate());
4566  xor_(scratch, dst, t9);
4567  and_(overflow_dst, scratch, overflow_dst);
4568  }
4569  }
4570 }
4571 
4572 
4573 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4574  Register right,
4575  Register overflow_dst,
4576  Register scratch) {
4577  DCHECK(!dst.is(overflow_dst));
4578  DCHECK(!dst.is(scratch));
4579  DCHECK(!overflow_dst.is(scratch));
4580  DCHECK(!overflow_dst.is(left));
4581  DCHECK(!overflow_dst.is(right));
4582 
4583  if (left.is(right) && dst.is(left)) {
4584  DCHECK(!dst.is(t9));
4585  DCHECK(!scratch.is(t9));
4586  DCHECK(!left.is(t9));
4587  DCHECK(!right.is(t9));
4588  DCHECK(!overflow_dst.is(t9));
4589  mov(t9, right);
4590  right = t9;
4591  }
4592 
4593  if (dst.is(left)) {
4594  mov(scratch, left); // Preserve left.
4595  addu(dst, left, right); // Left is overwritten.
4596  xor_(scratch, dst, scratch); // Original left.
4597  xor_(overflow_dst, dst, right);
4598  and_(overflow_dst, overflow_dst, scratch);
4599  } else if (dst.is(right)) {
4600  mov(scratch, right); // Preserve right.
4601  addu(dst, left, right); // Right is overwritten.
4602  xor_(scratch, dst, scratch); // Original right.
4603  xor_(overflow_dst, dst, left);
4604  and_(overflow_dst, overflow_dst, scratch);
4605  } else {
4606  addu(dst, left, right);
4607  xor_(overflow_dst, dst, left);
4608  xor_(scratch, dst, right);
4609  and_(overflow_dst, scratch, overflow_dst);
4610  }
4611 }
4612 
4613 
4614 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4615  const Operand& right,
4616  Register overflow_dst,
4617  Register scratch) {
4618  if (right.is_reg()) {
4619  SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4620  } else {
4621  if (dst.is(left)) {
4622  mov(scratch, left); // Preserve left.
4623  addiu(dst, left, -(right.immediate())); // Left is overwritten.
4624  xor_(overflow_dst, dst, scratch); // scratch is original left.
4625  // Load right since xori takes uint16 as immediate.
4626  addiu(t9, zero_reg, right.immediate());
4627  xor_(scratch, scratch, t9); // scratch is original left.
4628  and_(overflow_dst, scratch, overflow_dst);
4629  } else {
4630  addiu(dst, left, -(right.immediate()));
4631  xor_(overflow_dst, dst, left);
4632  // Load right since xori takes uint16 as immediate.
4633  addiu(t9, zero_reg, right.immediate());
4634  xor_(scratch, left, t9);
4635  and_(overflow_dst, scratch, overflow_dst);
4636  }
4637  }
4638 }
4639 
4640 
4641 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4642  Register right,
4643  Register overflow_dst,
4644  Register scratch) {
4645  DCHECK(!dst.is(overflow_dst));
4646  DCHECK(!dst.is(scratch));
4647  DCHECK(!overflow_dst.is(scratch));
4648  DCHECK(!overflow_dst.is(left));
4649  DCHECK(!overflow_dst.is(right));
4650  DCHECK(!scratch.is(left));
4651  DCHECK(!scratch.is(right));
4652 
4653  // This happens with some crankshaft code. Since Subu works fine if
4654  // left == right, let's not make that restriction here.
4655  if (left.is(right)) {
4656  mov(dst, zero_reg);
4657  mov(overflow_dst, zero_reg);
4658  return;
4659  }
4660 
4661  if (dst.is(left)) {
4662  mov(scratch, left); // Preserve left.
4663  subu(dst, left, right); // Left is overwritten.
4664  xor_(overflow_dst, dst, scratch); // scratch is original left.
4665  xor_(scratch, scratch, right); // scratch is original left.
4666  and_(overflow_dst, scratch, overflow_dst);
4667  } else if (dst.is(right)) {
4668  mov(scratch, right); // Preserve right.
4669  subu(dst, left, right); // Right is overwritten.
4670  xor_(overflow_dst, dst, left);
4671  xor_(scratch, left, scratch); // Original right.
4672  and_(overflow_dst, scratch, overflow_dst);
4673  } else {
4674  subu(dst, left, right);
4675  xor_(overflow_dst, dst, left);
4676  xor_(scratch, left, right);
4677  and_(overflow_dst, scratch, overflow_dst);
4678  }
4679 }
4680 
4681 
4682 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4683  int num_arguments,
4684  SaveFPRegsMode save_doubles) {
4685  // All parameters are on the stack. v0 has the return value after call.
4686 
4687  // If the expected number of arguments of the runtime function is
4688  // constant, we check that the actual number of arguments match the
4689  // expectation.
4690  CHECK(f->nargs < 0 || f->nargs == num_arguments);
4691 
4692  // TODO(1236192): Most runtime routines don't need the number of
4693  // arguments passed in because it is constant. At some point we
4694  // should remove this need and make the runtime routine entry code
4695  // smarter.
4696  PrepareCEntryArgs(num_arguments);
4697  PrepareCEntryFunction(ExternalReference(f, isolate()));
4698  CEntryStub stub(isolate(), 1, save_doubles);
4699  CallStub(&stub);
4700 }
4701 
4702 
4703 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4704  int num_arguments,
4705  BranchDelaySlot bd) {
4706  PrepareCEntryArgs(num_arguments);
4707  PrepareCEntryFunction(ext);
4708 
4709  CEntryStub stub(isolate(), 1);
4710  CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4711 }
4712 
4713 
4714 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4715  int num_arguments,
4716  int result_size) {
4717  // TODO(1236192): Most runtime routines don't need the number of
4718  // arguments passed in because it is constant. At some point we
4719  // should remove this need and make the runtime routine entry code
4720  // smarter.
4721  PrepareCEntryArgs(num_arguments);
4722  JumpToExternalReference(ext);
4723 }
4724 
4725 
4726 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4727  int num_arguments,
4728  int result_size) {
4729  TailCallExternalReference(ExternalReference(fid, isolate()),
4730  num_arguments,
4731  result_size);
4732 }
4733 
4734 
4735 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4736  BranchDelaySlot bd) {
4737  PrepareCEntryFunction(builtin);
4738  CEntryStub stub(isolate(), 1);
4739  Jump(stub.GetCode(),
4740  RelocInfo::CODE_TARGET,
4741  al,
4742  zero_reg,
4743  Operand(zero_reg),
4744  bd);
4745 }
4746 
4747 
4748 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4749  InvokeFlag flag,
4750  const CallWrapper& call_wrapper) {
4751  // You can't call a builtin without a valid frame.
4752  DCHECK(flag == JUMP_FUNCTION || has_frame());
4753 
4754  GetBuiltinEntry(t9, id);
4755  if (flag == CALL_FUNCTION) {
4756  call_wrapper.BeforeCall(CallSize(t9));
4757  Call(t9);
4758  call_wrapper.AfterCall();
4759  } else {
4761  Jump(t9);
4762  }
4763 }
4764 
4765 
4766 void MacroAssembler::GetBuiltinFunction(Register target,
4767  Builtins::JavaScript id) {
4768  // Load the builtins object into target register.
4769  lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4770  lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4771  // Load the JavaScript builtin function from the builtins object.
4772  lw(target, FieldMemOperand(target,
4773  JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4774 }
4775 
4776 
4777 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4778  DCHECK(!target.is(a1));
4779  GetBuiltinFunction(a1, id);
4780  // Load the code entry point from the builtins object.
4781  lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4782 }
4783 
4784 
4785 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4786  Register scratch1, Register scratch2) {
4787  if (FLAG_native_code_counters && counter->Enabled()) {
4788  li(scratch1, Operand(value));
4789  li(scratch2, Operand(ExternalReference(counter)));
4790  sw(scratch1, MemOperand(scratch2));
4791  }
4792 }
4793 
4794 
4795 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4796  Register scratch1, Register scratch2) {
4797  DCHECK(value > 0);
4798  if (FLAG_native_code_counters && counter->Enabled()) {
4799  li(scratch2, Operand(ExternalReference(counter)));
4800  lw(scratch1, MemOperand(scratch2));
4801  Addu(scratch1, scratch1, Operand(value));
4802  sw(scratch1, MemOperand(scratch2));
4803  }
4804 }
4805 
4806 
4807 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4808  Register scratch1, Register scratch2) {
4809  DCHECK(value > 0);
4810  if (FLAG_native_code_counters && counter->Enabled()) {
4811  li(scratch2, Operand(ExternalReference(counter)));
4812  lw(scratch1, MemOperand(scratch2));
4813  Subu(scratch1, scratch1, Operand(value));
4814  sw(scratch1, MemOperand(scratch2));
4815  }
4816 }
4817 
4818 
4819 // -----------------------------------------------------------------------------
4820 // Debugging.
4821 
4822 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4823  Register rs, Operand rt) {
4824  if (emit_debug_code())
4825  Check(cc, reason, rs, rt);
4826 }
4827 
4828 
4829 void MacroAssembler::AssertFastElements(Register elements) {
4830  if (emit_debug_code()) {
4831  DCHECK(!elements.is(at));
4832  Label ok;
4833  push(elements);
4834  lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4835  LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4836  Branch(&ok, eq, elements, Operand(at));
4837  LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4838  Branch(&ok, eq, elements, Operand(at));
4839  LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4840  Branch(&ok, eq, elements, Operand(at));
4841  Abort(kJSObjectWithFastElementsMapHasSlowElements);
4842  bind(&ok);
4843  pop(elements);
4844  }
4845 }
4846 
4847 
4848 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4849  Register rs, Operand rt) {
4850  Label L;
4851  Branch(&L, cc, rs, rt);
4852  Abort(reason);
4853  // Will not return here.
4854  bind(&L);
4855 }
4856 
4857 
4858 void MacroAssembler::Abort(BailoutReason reason) {
4859  Label abort_start;
4860  bind(&abort_start);
4861 #ifdef DEBUG
4862  const char* msg = GetBailoutReason(reason);
4863  if (msg != NULL) {
4864  RecordComment("Abort message: ");
4865  RecordComment(msg);
4866  }
4867 
4868  if (FLAG_trap_on_abort) {
4869  stop(msg);
4870  return;
4871  }
4872 #endif
4873 
4874  li(a0, Operand(Smi::FromInt(reason)));
4875  push(a0);
4876  // Disable stub call restrictions to always allow calls to abort.
4877  if (!has_frame_) {
4878  // We don't actually want to generate a pile of code for this, so just
4879  // claim there is a stack frame, without generating one.
4880  FrameScope scope(this, StackFrame::NONE);
4881  CallRuntime(Runtime::kAbort, 1);
4882  } else {
4883  CallRuntime(Runtime::kAbort, 1);
4884  }
4885  // Will not return here.
4886  if (is_trampoline_pool_blocked()) {
4887  // If the calling code cares about the exact number of
4888  // instructions generated, we insert padding here to keep the size
4889  // of the Abort macro constant.
4890  // Currently in debug mode with debug_code enabled the number of
4891  // generated instructions is 10, so we use this as a maximum value.
4892  static const int kExpectedAbortInstructions = 10;
4893  int abort_instructions = InstructionsGeneratedSince(&abort_start);
4894  DCHECK(abort_instructions <= kExpectedAbortInstructions);
4895  while (abort_instructions++ < kExpectedAbortInstructions) {
4896  nop();
4897  }
4898  }
4899 }
4900 
4901 
4902 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4903  if (context_chain_length > 0) {
4904  // Move up the chain of contexts to the context containing the slot.
4905  lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4906  for (int i = 1; i < context_chain_length; i++) {
4907  lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4908  }
4909  } else {
4910  // Slot is in the current function context. Move it into the
4911  // destination register in case we store into it (the write barrier
4912  // cannot be allowed to destroy the context in esi).
4913  Move(dst, cp);
4914  }
4915 }
4916 
4917 
4918 void MacroAssembler::LoadTransitionedArrayMapConditional(
4919  ElementsKind expected_kind,
4920  ElementsKind transitioned_kind,
4921  Register map_in_out,
4922  Register scratch,
4923  Label* no_map_match) {
4924  // Load the global or builtins object from the current context.
4925  lw(scratch,
4926  MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4927  lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4928 
4929  // Check that the function's map is the same as the expected cached map.
4930  lw(scratch,
4931  MemOperand(scratch,
4932  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4933  size_t offset = expected_kind * kPointerSize +
4934  FixedArrayBase::kHeaderSize;
4935  lw(at, FieldMemOperand(scratch, offset));
4936  Branch(no_map_match, ne, map_in_out, Operand(at));
4937 
4938  // Use the transitioned cached map.
4939  offset = transitioned_kind * kPointerSize +
4940  FixedArrayBase::kHeaderSize;
4941  lw(map_in_out, FieldMemOperand(scratch, offset));
4942 }
4943 
4944 
4945 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4946  // Load the global or builtins object from the current context.
4947  lw(function,
4948  MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4949  // Load the native context from the global or builtins object.
4950  lw(function, FieldMemOperand(function,
4951  GlobalObject::kNativeContextOffset));
4952  // Load the function from the native context.
4953  lw(function, MemOperand(function, Context::SlotOffset(index)));
4954 }
4955 
4956 
4957 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4958  Register map,
4959  Register scratch) {
4960  // Load the initial map. The global functions all have initial maps.
4961  lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4962  if (emit_debug_code()) {
4963  Label ok, fail;
4964  CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4965  Branch(&ok);
4966  bind(&fail);
4967  Abort(kGlobalFunctionsMustHaveInitialMap);
4968  bind(&ok);
4969  }
4970 }
4971 
4972 
4973 void MacroAssembler::StubPrologue() {
4974  Push(ra, fp, cp);
4975  Push(Smi::FromInt(StackFrame::STUB));
4976  // Adjust FP to point to saved FP.
4977  Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4978 }
4979 
4980 
4981 void MacroAssembler::Prologue(bool code_pre_aging) {
4982  PredictableCodeSizeScope predictible_code_size_scope(
4983  this, kNoCodeAgeSequenceLength);
4984  // The following three instructions must remain together and unmodified
4985  // for code aging to work properly.
4986  if (code_pre_aging) {
4987  // Pre-age the code.
4988  Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4989  nop(Assembler::CODE_AGE_MARKER_NOP);
4990  // Load the stub address to t9 and call it,
4991  // GetCodeAgeAndParity() extracts the stub address from this instruction.
4992  li(t9,
4993  Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
4994  CONSTANT_SIZE);
4995  nop(); // Prevent jalr to jal optimization.
4996  jalr(t9, a0);
4997  nop(); // Branch delay slot nop.
4998  nop(); // Pad the empty space.
4999  } else {
5000  Push(ra, fp, cp, a1);
5001  nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5002  // Adjust fp to point to caller's fp.
5003  Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
5004  }
5005 }
5006 
5007 
5008 void MacroAssembler::EnterFrame(StackFrame::Type type) {
5009  addiu(sp, sp, -5 * kPointerSize);
5010  li(t8, Operand(Smi::FromInt(type)));
5011  li(t9, Operand(CodeObject()), CONSTANT_SIZE);
5012  sw(ra, MemOperand(sp, 4 * kPointerSize));
5013  sw(fp, MemOperand(sp, 3 * kPointerSize));
5014  sw(cp, MemOperand(sp, 2 * kPointerSize));
5015  sw(t8, MemOperand(sp, 1 * kPointerSize));
5016  sw(t9, MemOperand(sp, 0 * kPointerSize));
5017  // Adjust FP to point to saved FP.
5018  Addu(fp, sp,
5019  Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
5020 }
5021 
5022 
5023 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
5024  mov(sp, fp);
5025  lw(fp, MemOperand(sp, 0 * kPointerSize));
5026  lw(ra, MemOperand(sp, 1 * kPointerSize));
5027  addiu(sp, sp, 2 * kPointerSize);
5028 }
5029 
5030 
5031 void MacroAssembler::EnterExitFrame(bool save_doubles,
5032  int stack_space) {
5033  // Set up the frame structure on the stack.
5034  STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
5035  STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
5036  STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
5037 
5038  // This is how the stack will look:
5039  // fp + 2 (==kCallerSPDisplacement) - old stack's end
5040  // [fp + 1 (==kCallerPCOffset)] - saved old ra
5041  // [fp + 0 (==kCallerFPOffset)] - saved old fp
5042  // [fp - 1 (==kSPOffset)] - sp of the called function
5043  // [fp - 2 (==kCodeOffset)] - CodeObject
5044  // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
5045  // new stack (will contain saved ra)
5046 
5047  // Save registers.
5048  addiu(sp, sp, -4 * kPointerSize);
5049  sw(ra, MemOperand(sp, 3 * kPointerSize));
5050  sw(fp, MemOperand(sp, 2 * kPointerSize));
5051  addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
5052 
5053  if (emit_debug_code()) {
5054  sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
5055  }
5056 
5057  // Accessed from ExitFrame::code_slot.
5058  li(t8, Operand(CodeObject()), CONSTANT_SIZE);
5059  sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
5060 
5061  // Save the frame pointer and the context in top.
5062  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5063  sw(fp, MemOperand(t8));
5064  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5065  sw(cp, MemOperand(t8));
5066 
5067  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
5068  if (save_doubles) {
5069  // The stack must be allign to 0 modulo 8 for stores with sdc1.
5070  DCHECK(kDoubleSize == frame_alignment);
5071  if (frame_alignment > 0) {
5072  DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5073  And(sp, sp, Operand(-frame_alignment)); // Align stack.
5074  }
5075  int space = FPURegister::kMaxNumRegisters * kDoubleSize;
5076  Subu(sp, sp, Operand(space));
5077  // Remember: we only need to save every 2nd double FPU value.
5078  for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
5079  FPURegister reg = FPURegister::from_code(i);
5080  sdc1(reg, MemOperand(sp, i * kDoubleSize));
5081  }
5082  }
5083 
5084  // Reserve place for the return address, stack space and an optional slot
5085  // (used by the DirectCEntryStub to hold the return value if a struct is
5086  // returned) and align the frame preparing for calling the runtime function.
5087  DCHECK(stack_space >= 0);
5088  Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
5089  if (frame_alignment > 0) {
5090  DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5091  And(sp, sp, Operand(-frame_alignment)); // Align stack.
5092  }
5093 
5094  // Set the exit frame sp value to point just before the return address
5095  // location.
5096  addiu(at, sp, kPointerSize);
5097  sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
5098 }
5099 
5100 
5101 void MacroAssembler::LeaveExitFrame(bool save_doubles,
5102  Register argument_count,
5103  bool restore_context,
5104  bool do_return) {
5105  // Optionally restore all double registers.
5106  if (save_doubles) {
5107  // Remember: we only need to restore every 2nd double FPU value.
5108  lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
5109  for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
5110  FPURegister reg = FPURegister::from_code(i);
5111  ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
5112  }
5113  }
5114 
5115  // Clear top frame.
5116  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5117  sw(zero_reg, MemOperand(t8));
5118 
5119  // Restore current context from top and clear it in debug mode.
5120  if (restore_context) {
5121  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5122  lw(cp, MemOperand(t8));
5123  }
5124 #ifdef DEBUG
5125  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5126  sw(a3, MemOperand(t8));
5127 #endif
5128 
5129  // Pop the arguments, restore registers, and return.
5130  mov(sp, fp); // Respect ABI stack constraint.
5131  lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
5132  lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
5133 
5134  if (argument_count.is_valid()) {
5135  sll(t8, argument_count, kPointerSizeLog2);
5136  addu(sp, sp, t8);
5137  }
5138 
5139  if (do_return) {
5140  Ret(USE_DELAY_SLOT);
5141  // If returning, the instruction in the delay slot will be the addiu below.
5142  }
5143  addiu(sp, sp, 8);
5144 }
5145 
5146 
5147 void MacroAssembler::InitializeNewString(Register string,
5148  Register length,
5149  Heap::RootListIndex map_index,
5150  Register scratch1,
5151  Register scratch2) {
5152  sll(scratch1, length, kSmiTagSize);
5153  LoadRoot(scratch2, map_index);
5154  sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
5155  li(scratch1, Operand(String::kEmptyHashField));
5156  sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
5157  sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
5158 }
5159 
5160 
5161 int MacroAssembler::ActivationFrameAlignment() {
5162 #if V8_HOST_ARCH_MIPS
5163  // Running on the real platform. Use the alignment as mandated by the local
5164  // environment.
5165  // Note: This will break if we ever start generating snapshots on one Mips
5166  // platform for another Mips platform with a different alignment.
5167  return base::OS::ActivationFrameAlignment();
5168 #else // V8_HOST_ARCH_MIPS
5169  // If we are using the simulator then we should always align to the expected
5170  // alignment. As the simulator is used to generate snapshots we do not know
5171  // if the target platform will need alignment, so this is controlled from a
5172  // flag.
5173  return FLAG_sim_stack_alignment;
5174 #endif // V8_HOST_ARCH_MIPS
5175 }
5176 
5177 
5178 void MacroAssembler::AssertStackIsAligned() {
5179  if (emit_debug_code()) {
5180  const int frame_alignment = ActivationFrameAlignment();
5181  const int frame_alignment_mask = frame_alignment - 1;
5182 
5183  if (frame_alignment > kPointerSize) {
5184  Label alignment_as_expected;
5185  DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5186  andi(at, sp, frame_alignment_mask);
5187  Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5188  // Don't use Check here, as it will call Runtime_Abort re-entering here.
5189  stop("Unexpected stack alignment");
5190  bind(&alignment_as_expected);
5191  }
5192  }
5193 }
5194 
5195 
5196 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
5197  Register reg,
5198  Register scratch,
5199  Label* not_power_of_two_or_zero) {
5200  Subu(scratch, reg, Operand(1));
5201  Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
5202  scratch, Operand(zero_reg));
5203  and_(at, scratch, reg); // In the delay slot.
5204  Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
5205 }
5206 
5207 
5208 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
5209  DCHECK(!reg.is(overflow));
5210  mov(overflow, reg); // Save original value.
5211  SmiTag(reg);
5212  xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
5213 }
5214 
5215 
5216 void MacroAssembler::SmiTagCheckOverflow(Register dst,
5217  Register src,
5218  Register overflow) {
5219  if (dst.is(src)) {
5220  // Fall back to slower case.
5221  SmiTagCheckOverflow(dst, overflow);
5222  } else {
5223  DCHECK(!dst.is(src));
5224  DCHECK(!dst.is(overflow));
5225  DCHECK(!src.is(overflow));
5226  SmiTag(dst, src);
5227  xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
5228  }
5229 }
5230 
5231 
5232 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5233  Register src,
5234  Label* smi_case) {
5235  JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5236  SmiUntag(dst, src);
5237 }
5238 
5239 
5240 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5241  Register src,
5242  Label* non_smi_case) {
5243  JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5244  SmiUntag(dst, src);
5245 }
5246 
5247 void MacroAssembler::JumpIfSmi(Register value,
5248  Label* smi_label,
5249  Register scratch,
5250  BranchDelaySlot bd) {
5251  DCHECK_EQ(0, kSmiTag);
5252  andi(scratch, value, kSmiTagMask);
5253  Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5254 }
5255 
5256 void MacroAssembler::JumpIfNotSmi(Register value,
5257  Label* not_smi_label,
5258  Register scratch,
5259  BranchDelaySlot bd) {
5260  DCHECK_EQ(0, kSmiTag);
5261  andi(scratch, value, kSmiTagMask);
5262  Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5263 }
5264 
5265 
5266 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5267  Register reg2,
5268  Label* on_not_both_smi) {
5269  STATIC_ASSERT(kSmiTag == 0);
5270  DCHECK_EQ(1, kSmiTagMask);
5271  or_(at, reg1, reg2);
5272  JumpIfNotSmi(at, on_not_both_smi);
5273 }
5274 
5275 
5276 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5277  Register reg2,
5278  Label* on_either_smi) {
5279  STATIC_ASSERT(kSmiTag == 0);
5280  DCHECK_EQ(1, kSmiTagMask);
5281  // Both Smi tags must be 1 (not Smi).
5282  and_(at, reg1, reg2);
5283  JumpIfSmi(at, on_either_smi);
5284 }
5285 
5286 
5287 void MacroAssembler::AssertNotSmi(Register object) {
5288  if (emit_debug_code()) {
5289  STATIC_ASSERT(kSmiTag == 0);
5290  andi(at, object, kSmiTagMask);
5291  Check(ne, kOperandIsASmi, at, Operand(zero_reg));
5292  }
5293 }
5294 
5295 
5296 void MacroAssembler::AssertSmi(Register object) {
5297  if (emit_debug_code()) {
5298  STATIC_ASSERT(kSmiTag == 0);
5299  andi(at, object, kSmiTagMask);
5300  Check(eq, kOperandIsASmi, at, Operand(zero_reg));
5301  }
5302 }
5303 
5304 
5305 void MacroAssembler::AssertString(Register object) {
5306  if (emit_debug_code()) {
5307  STATIC_ASSERT(kSmiTag == 0);
5308  SmiTst(object, t0);
5309  Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
5310  push(object);
5311  lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5312  lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5313  Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
5314  pop(object);
5315  }
5316 }
5317 
5318 
5319 void MacroAssembler::AssertName(Register object) {
5320  if (emit_debug_code()) {
5321  STATIC_ASSERT(kSmiTag == 0);
5322  SmiTst(object, t0);
5323  Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
5324  push(object);
5325  lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5326  lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5327  Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
5328  pop(object);
5329  }
5330 }
5331 
5332 
5333 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
5334  Register scratch) {
5335  if (emit_debug_code()) {
5336  Label done_checking;
5337  AssertNotSmi(object);
5338  LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5339  Branch(&done_checking, eq, object, Operand(scratch));
5340  push(object);
5341  lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5342  LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5343  Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
5344  pop(object);
5345  bind(&done_checking);
5346  }
5347 }
5348 
5349 
5350 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5351  if (emit_debug_code()) {
5352  DCHECK(!reg.is(at));
5353  LoadRoot(at, index);
5354  Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5355  }
5356 }
5357 
5358 
5359 void MacroAssembler::JumpIfNotHeapNumber(Register object,
5360  Register heap_number_map,
5361  Register scratch,
5362  Label* on_not_heap_number) {
5363  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5364  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
5365  Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
5366 }
5367 
5368 
5369 void MacroAssembler::LookupNumberStringCache(Register object,
5370  Register result,
5371  Register scratch1,
5372  Register scratch2,
5373  Register scratch3,
5374  Label* not_found) {
5375  // Use of registers. Register result is used as a temporary.
5376  Register number_string_cache = result;
5377  Register mask = scratch3;
5378 
5379  // Load the number string cache.
5380  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
5381 
5382  // Make the hash mask from the length of the number string cache. It
5383  // contains two elements (number and string) for each cache entry.
5384  lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
5385  // Divide length by two (length is a smi).
5386  sra(mask, mask, kSmiTagSize + 1);
5387  Addu(mask, mask, -1); // Make mask.
5388 
5389  // Calculate the entry in the number string cache. The hash value in the
5390  // number string cache for smis is just the smi value, and the hash for
5391  // doubles is the xor of the upper and lower words. See
5392  // Heap::GetNumberStringCache.
5393  Label is_smi;
5394  Label load_result_from_cache;
5395  JumpIfSmi(object, &is_smi);
5396  CheckMap(object,
5397  scratch1,
5398  Heap::kHeapNumberMapRootIndex,
5399  not_found,
5401 
5402  STATIC_ASSERT(8 == kDoubleSize);
5403  Addu(scratch1,
5404  object,
5405  Operand(HeapNumber::kValueOffset - kHeapObjectTag));
5406  lw(scratch2, MemOperand(scratch1, kPointerSize));
5407  lw(scratch1, MemOperand(scratch1, 0));
5408  Xor(scratch1, scratch1, Operand(scratch2));
5409  And(scratch1, scratch1, Operand(mask));
5410 
5411  // Calculate address of entry in string cache: each entry consists
5412  // of two pointer sized fields.
5413  sll(scratch1, scratch1, kPointerSizeLog2 + 1);
5414  Addu(scratch1, number_string_cache, scratch1);
5415 
5416  Register probe = mask;
5417  lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
5418  JumpIfSmi(probe, not_found);
5419  ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
5420  ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
5421  BranchF(&load_result_from_cache, NULL, eq, f12, f14);
5422  Branch(not_found);
5423 
5424  bind(&is_smi);
5425  Register scratch = scratch1;
5426  sra(scratch, object, 1); // Shift away the tag.
5427  And(scratch, mask, Operand(scratch));
5428 
5429  // Calculate address of entry in string cache: each entry consists
5430  // of two pointer sized fields.
5431  sll(scratch, scratch, kPointerSizeLog2 + 1);
5432  Addu(scratch, number_string_cache, scratch);
5433 
5434  // Check if the entry is the smi we are looking for.
5435  lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
5436  Branch(not_found, ne, object, Operand(probe));
5437 
5438  // Get the result from the cache.
5439  bind(&load_result_from_cache);
5440  lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
5441 
5442  IncrementCounter(isolate()->counters()->number_to_string_native(),
5443  1,
5444  scratch1,
5445  scratch2);
5446 }
5447 
5448 
5449 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
5450  Register first, Register second, Register scratch1, Register scratch2,
5451  Label* failure) {
5452  // Test that both first and second are sequential one-byte strings.
5453  // Assume that they are non-smis.
5454  lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5455  lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5456  lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5457  lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5458 
5459  JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
5460  scratch2, failure);
5461 }
5462 
5463 
5464 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
5465  Register second,
5466  Register scratch1,
5467  Register scratch2,
5468  Label* failure) {
5469  // Check that neither is a smi.
5470  STATIC_ASSERT(kSmiTag == 0);
5471  And(scratch1, first, Operand(second));
5472  JumpIfSmi(scratch1, failure);
5473  JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
5474  scratch2, failure);
5475 }
5476 
5477 
5478 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
5479  Register first, Register second, Register scratch1, Register scratch2,
5480  Label* failure) {
5481  const int kFlatOneByteStringMask =
5483  const int kFlatOneByteStringTag =
5485  DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
5486  andi(scratch1, first, kFlatOneByteStringMask);
5487  Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
5488  andi(scratch2, second, kFlatOneByteStringMask);
5489  Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
5490 }
5491 
5492 
5493 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
5494  Register scratch,
5495  Label* failure) {
5496  const int kFlatOneByteStringMask =
5498  const int kFlatOneByteStringTag =
5500  And(scratch, type, Operand(kFlatOneByteStringMask));
5501  Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
5502 }
5503 
5504 
5505 static const int kRegisterPassedArguments = 4;
5506 
5507 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5508  int num_double_arguments) {
5509  int stack_passed_words = 0;
5510  num_reg_arguments += 2 * num_double_arguments;
5511 
5512  // Up to four simple arguments are passed in registers a0..a3.
5513  if (num_reg_arguments > kRegisterPassedArguments) {
5514  stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5515  }
5516  stack_passed_words += kCArgSlotCount;
5517  return stack_passed_words;
5518 }
5519 
5520 
5521 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5522  Register index,
5523  Register value,
5524  Register scratch,
5525  uint32_t encoding_mask) {
5526  Label is_object;
5527  SmiTst(string, at);
5528  Check(ne, kNonObject, at, Operand(zero_reg));
5529 
5530  lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
5531  lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5532 
5534  li(scratch, Operand(encoding_mask));
5535  Check(eq, kUnexpectedStringType, at, Operand(scratch));
5536 
5537  // The index is assumed to be untagged coming in, tag it to compare with the
5538  // string length without using a temp register, it is restored at the end of
5539  // this function.
5540  Label index_tag_ok, index_tag_bad;
5541  TrySmiTag(index, scratch, &index_tag_bad);
5542  Branch(&index_tag_ok);
5543  bind(&index_tag_bad);
5544  Abort(kIndexIsTooLarge);
5545  bind(&index_tag_ok);
5546 
5547  lw(at, FieldMemOperand(string, String::kLengthOffset));
5548  Check(lt, kIndexIsTooLarge, index, Operand(at));
5549 
5550  DCHECK(Smi::FromInt(0) == 0);
5551  Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5552 
5553  SmiUntag(index, index);
5554 }
5555 
5556 
5557 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5558  int num_double_arguments,
5559  Register scratch) {
5560  int frame_alignment = ActivationFrameAlignment();
5561 
5562  // Up to four simple arguments are passed in registers a0..a3.
5563  // Those four arguments must have reserved argument slots on the stack for
5564  // mips, even though those argument slots are not normally used.
5565  // Remaining arguments are pushed on the stack, above (higher address than)
5566  // the argument slots.
5567  int stack_passed_arguments = CalculateStackPassedWords(
5568  num_reg_arguments, num_double_arguments);
5569  if (frame_alignment > kPointerSize) {
5570  // Make stack end at alignment and make room for num_arguments - 4 words
5571  // and the original value of sp.
5572  mov(scratch, sp);
5573  Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5574  DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5575  And(sp, sp, Operand(-frame_alignment));
5576  sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5577  } else {
5578  Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5579  }
5580 }
5581 
5582 
5583 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5584  Register scratch) {
5585  PrepareCallCFunction(num_reg_arguments, 0, scratch);
5586 }
5587 
5588 
5589 void MacroAssembler::CallCFunction(ExternalReference function,
5590  int num_reg_arguments,
5591  int num_double_arguments) {
5592  li(t8, Operand(function));
5593  CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5594 }
5595 
5596 
5597 void MacroAssembler::CallCFunction(Register function,
5598  int num_reg_arguments,
5599  int num_double_arguments) {
5600  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5601 }
5602 
5603 
5604 void MacroAssembler::CallCFunction(ExternalReference function,
5605  int num_arguments) {
5606  CallCFunction(function, num_arguments, 0);
5607 }
5608 
5609 
5610 void MacroAssembler::CallCFunction(Register function,
5611  int num_arguments) {
5612  CallCFunction(function, num_arguments, 0);
5613 }
5614 
5615 
5616 void MacroAssembler::CallCFunctionHelper(Register function,
5617  int num_reg_arguments,
5618  int num_double_arguments) {
5619  DCHECK(has_frame());
5620  // Make sure that the stack is aligned before calling a C function unless
5621  // running in the simulator. The simulator has its own alignment check which
5622  // provides more information.
5623  // The argument stots are presumed to have been set up by
5624  // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5625 
5626 #if V8_HOST_ARCH_MIPS
5627  if (emit_debug_code()) {
5628  int frame_alignment = base::OS::ActivationFrameAlignment();
5629  int frame_alignment_mask = frame_alignment - 1;
5630  if (frame_alignment > kPointerSize) {
5631  DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5632  Label alignment_as_expected;
5633  And(at, sp, Operand(frame_alignment_mask));
5634  Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5635  // Don't use Check here, as it will call Runtime_Abort possibly
5636  // re-entering here.
5637  stop("Unexpected alignment in CallCFunction");
5638  bind(&alignment_as_expected);
5639  }
5640  }
5641 #endif // V8_HOST_ARCH_MIPS
5642 
5643  // Just call directly. The function called cannot cause a GC, or
5644  // allow preemption, so the return address in the link register
5645  // stays correct.
5646 
5647  if (!function.is(t9)) {
5648  mov(t9, function);
5649  function = t9;
5650  }
5651 
5652  Call(function);
5653 
5654  int stack_passed_arguments = CalculateStackPassedWords(
5655  num_reg_arguments, num_double_arguments);
5656 
5657  if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5658  lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5659  } else {
5660  Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
5661  }
5662 }
5663 
5664 
5665 #undef BRANCH_ARGS_CHECK
5666 
5667 
5668 void MacroAssembler::PatchRelocatedValue(Register li_location,
5669  Register scratch,
5670  Register new_value) {
5671  lw(scratch, MemOperand(li_location));
5672  // At this point scratch is a lui(at, ...) instruction.
5673  if (emit_debug_code()) {
5674  And(scratch, scratch, kOpcodeMask);
5675  Check(eq, kTheInstructionToPatchShouldBeALui,
5676  scratch, Operand(LUI));
5677  lw(scratch, MemOperand(li_location));
5678  }
5679  srl(t9, new_value, kImm16Bits);
5680  Ins(scratch, t9, 0, kImm16Bits);
5681  sw(scratch, MemOperand(li_location));
5682 
5683  lw(scratch, MemOperand(li_location, kInstrSize));
5684  // scratch is now ori(at, ...).
5685  if (emit_debug_code()) {
5686  And(scratch, scratch, kOpcodeMask);
5687  Check(eq, kTheInstructionToPatchShouldBeAnOri,
5688  scratch, Operand(ORI));
5689  lw(scratch, MemOperand(li_location, kInstrSize));
5690  }
5691  Ins(scratch, new_value, 0, kImm16Bits);
5692  sw(scratch, MemOperand(li_location, kInstrSize));
5693 
5694  // Update the I-cache so the new lui and ori can be executed.
5695  FlushICache(li_location, 2);
5696 }
5697 
5698 void MacroAssembler::GetRelocatedValue(Register li_location,
5699  Register value,
5700  Register scratch) {
5701  lw(value, MemOperand(li_location));
5702  if (emit_debug_code()) {
5703  And(value, value, kOpcodeMask);
5704  Check(eq, kTheInstructionShouldBeALui,
5705  value, Operand(LUI));
5706  lw(value, MemOperand(li_location));
5707  }
5708 
5709  // value now holds a lui instruction. Extract the immediate.
5710  sll(value, value, kImm16Bits);
5711 
5712  lw(scratch, MemOperand(li_location, kInstrSize));
5713  if (emit_debug_code()) {
5714  And(scratch, scratch, kOpcodeMask);
5715  Check(eq, kTheInstructionShouldBeAnOri,
5716  scratch, Operand(ORI));
5717  lw(scratch, MemOperand(li_location, kInstrSize));
5718  }
5719  // "scratch" now holds an ori instruction. Extract the immediate.
5720  andi(scratch, scratch, kImm16Mask);
5721 
5722  // Merge the results.
5723  or_(value, value, scratch);
5724 }
5725 
5726 
5727 void MacroAssembler::CheckPageFlag(
5728  Register object,
5729  Register scratch,
5730  int mask,
5731  Condition cc,
5732  Label* condition_met) {
5733  And(scratch, object, Operand(~Page::kPageAlignmentMask));
5734  lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5735  And(scratch, scratch, Operand(mask));
5736  Branch(condition_met, cc, scratch, Operand(zero_reg));
5737 }
5738 
5739 
5740 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5741  Register scratch,
5742  Label* if_deprecated) {
5743  if (map->CanBeDeprecated()) {
5744  li(scratch, Operand(map));
5745  lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
5746  And(scratch, scratch, Operand(Map::Deprecated::kMask));
5747  Branch(if_deprecated, ne, scratch, Operand(zero_reg));
5748  }
5749 }
5750 
5751 
5752 void MacroAssembler::JumpIfBlack(Register object,
5753  Register scratch0,
5754  Register scratch1,
5755  Label* on_black) {
5756  HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5757  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5758 }
5759 
5760 
5761 void MacroAssembler::HasColor(Register object,
5762  Register bitmap_scratch,
5763  Register mask_scratch,
5764  Label* has_color,
5765  int first_bit,
5766  int second_bit) {
5767  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5768  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5769 
5770  GetMarkBits(object, bitmap_scratch, mask_scratch);
5771 
5772  Label other_color, word_boundary;
5773  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5774  And(t8, t9, Operand(mask_scratch));
5775  Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5776  // Shift left 1 by adding.
5777  Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5778  Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5779  And(t8, t9, Operand(mask_scratch));
5780  Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5781  jmp(&other_color);
5782 
5783  bind(&word_boundary);
5784  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5785  And(t9, t9, Operand(1));
5786  Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5787  bind(&other_color);
5788 }
5789 
5790 
5791 // Detect some, but not all, common pointer-free objects. This is used by the
5792 // incremental write barrier which doesn't care about oddballs (they are always
5793 // marked black immediately so this code is not hit).
5794 void MacroAssembler::JumpIfDataObject(Register value,
5795  Register scratch,
5796  Label* not_data_object) {
5797  DCHECK(!AreAliased(value, scratch, t8, no_reg));
5798  Label is_data_object;
5799  lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5800  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5801  Branch(&is_data_object, eq, t8, Operand(scratch));
5803  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5804  // If it's a string and it's not a cons string then it's an object containing
5805  // no GC pointers.
5806  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5807  And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5808  Branch(not_data_object, ne, t8, Operand(zero_reg));
5809  bind(&is_data_object);
5810 }
5811 
5812 
5813 void MacroAssembler::GetMarkBits(Register addr_reg,
5814  Register bitmap_reg,
5815  Register mask_reg) {
5816  DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5817  And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5818  Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5819  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5820  Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5821  sll(t8, t8, kPointerSizeLog2);
5822  Addu(bitmap_reg, bitmap_reg, t8);
5823  li(t8, Operand(1));
5824  sllv(mask_reg, t8, mask_reg);
5825 }
5826 
5827 
5828 void MacroAssembler::EnsureNotWhite(
5829  Register value,
5830  Register bitmap_scratch,
5831  Register mask_scratch,
5832  Register load_scratch,
5833  Label* value_is_white_and_not_data) {
5834  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5835  GetMarkBits(value, bitmap_scratch, mask_scratch);
5836 
5837  // If the value is black or grey we don't need to do anything.
5838  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5839  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5840  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5841  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5842 
5843  Label done;
5844 
5845  // Since both black and grey have a 1 in the first position and white does
5846  // not have a 1 there we only need to check one bit.
5847  lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5848  And(t8, mask_scratch, load_scratch);
5849  Branch(&done, ne, t8, Operand(zero_reg));
5850 
5851  if (emit_debug_code()) {
5852  // Check for impossible bit pattern.
5853  Label ok;
5854  // sll may overflow, making the check conservative.
5855  sll(t8, mask_scratch, 1);
5856  And(t8, load_scratch, t8);
5857  Branch(&ok, eq, t8, Operand(zero_reg));
5858  stop("Impossible marking bit pattern");
5859  bind(&ok);
5860  }
5861 
5862  // Value is white. We check whether it is data that doesn't need scanning.
5863  // Currently only checks for HeapNumber and non-cons strings.
5864  Register map = load_scratch; // Holds map while checking type.
5865  Register length = load_scratch; // Holds length of object after testing type.
5866  Label is_data_object;
5867 
5868  // Check for heap-number
5869  lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5870  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5871  {
5872  Label skip;
5873  Branch(&skip, ne, t8, Operand(map));
5874  li(length, HeapNumber::kSize);
5875  Branch(&is_data_object);
5876  bind(&skip);
5877  }
5878 
5879  // Check for strings.
5881  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5882  // If it's a string and it's not a cons string then it's an object containing
5883  // no GC pointers.
5884  Register instance_type = load_scratch;
5885  lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5886  And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5887  Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5888  // It's a non-indirect (non-cons and non-slice) string.
5889  // If it's external, the length is just ExternalString::kSize.
5890  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5891  // External strings are the only ones with the kExternalStringTag bit
5892  // set.
5895  And(t8, instance_type, Operand(kExternalStringTag));
5896  {
5897  Label skip;
5898  Branch(&skip, eq, t8, Operand(zero_reg));
5899  li(length, ExternalString::kSize);
5900  Branch(&is_data_object);
5901  bind(&skip);
5902  }
5903 
5904  // Sequential string, either Latin1 or UC16.
5905  // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
5906  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5907  // getting the length multiplied by 2.
5909  DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
5910  lw(t9, FieldMemOperand(value, String::kLengthOffset));
5911  And(t8, instance_type, Operand(kStringEncodingMask));
5912  {
5913  Label skip;
5914  Branch(&skip, eq, t8, Operand(zero_reg));
5915  srl(t9, t9, 1);
5916  bind(&skip);
5917  }
5918  Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5919  And(length, length, Operand(~kObjectAlignmentMask));
5920 
5921  bind(&is_data_object);
5922  // Value is a data object, and it is white. Mark it black. Since we know
5923  // that the object is white we can make it black by flipping one bit.
5924  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5925  Or(t8, t8, Operand(mask_scratch));
5926  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5927 
5928  And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5929  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5930  Addu(t8, t8, Operand(length));
5931  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5932 
5933  bind(&done);
5934 }
5935 
5936 
5937 void MacroAssembler::LoadInstanceDescriptors(Register map,
5938  Register descriptors) {
5939  lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5940 }
5941 
5942 
5943 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5944  lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5945  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5946 }
5947 
5948 
5949 void MacroAssembler::EnumLength(Register dst, Register map) {
5950  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5951  lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5952  And(dst, dst, Operand(Map::EnumLengthBits::kMask));
5953  SmiTag(dst);
5954 }
5955 
5956 
5957 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5958  Register empty_fixed_array_value = t2;
5959  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5960  Label next, start;
5961  mov(a2, a0);
5962 
5963  // Check if the enum length field is properly initialized, indicating that
5964  // there is an enum cache.
5965  lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5966 
5967  EnumLength(a3, a1);
5968  Branch(
5969  call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5970 
5971  jmp(&start);
5972 
5973  bind(&next);
5974  lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5975 
5976  // For all objects but the receiver, check that the cache is empty.
5977  EnumLength(a3, a1);
5978  Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5979 
5980  bind(&start);
5981 
5982  // Check that there are no elements. Register a2 contains the current JS
5983  // object we've reached through the prototype chain.
5984  Label no_elements;
5985  lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5986  Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5987 
5988  // Second chance, the object may be using the empty slow element dictionary.
5989  LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5990  Branch(call_runtime, ne, a2, Operand(at));
5991 
5992  bind(&no_elements);
5993  lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5994  Branch(&next, ne, a2, Operand(null_value));
5995 }
5996 
5997 
5998 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5999  DCHECK(!output_reg.is(input_reg));
6000  Label done;
6001  li(output_reg, Operand(255));
6002  // Normal branch: nop in delay slot.
6003  Branch(&done, gt, input_reg, Operand(output_reg));
6004  // Use delay slot in this branch.
6005  Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
6006  mov(output_reg, zero_reg); // In delay slot.
6007  mov(output_reg, input_reg); // Value is in range 0..255.
6008  bind(&done);
6009 }
6010 
6011 
6012 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
6013  DoubleRegister input_reg,
6014  DoubleRegister temp_double_reg) {
6015  Label above_zero;
6016  Label done;
6017  Label in_bounds;
6018 
6019  Move(temp_double_reg, 0.0);
6020  BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
6021 
6022  // Double value is less than zero, NaN or Inf, return 0.
6023  mov(result_reg, zero_reg);
6024  Branch(&done);
6025 
6026  // Double value is >= 255, return 255.
6027  bind(&above_zero);
6028  Move(temp_double_reg, 255.0);
6029  BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
6030  li(result_reg, Operand(255));
6031  Branch(&done);
6032 
6033  // In 0-255 range, round and truncate.
6034  bind(&in_bounds);
6035  cvt_w_d(temp_double_reg, input_reg);
6036  mfc1(result_reg, temp_double_reg);
6037  bind(&done);
6038 }
6039 
6040 
6041 void MacroAssembler::TestJSArrayForAllocationMemento(
6042  Register receiver_reg,
6043  Register scratch_reg,
6044  Label* no_memento_found,
6045  Condition cond,
6046  Label* allocation_memento_present) {
6047  ExternalReference new_space_start =
6048  ExternalReference::new_space_start(isolate());
6049  ExternalReference new_space_allocation_top =
6050  ExternalReference::new_space_allocation_top_address(isolate());
6051  Addu(scratch_reg, receiver_reg,
6052  Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
6053  Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
6054  li(at, Operand(new_space_allocation_top));
6055  lw(at, MemOperand(at));
6056  Branch(no_memento_found, gt, scratch_reg, Operand(at));
6057  lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
6058  if (allocation_memento_present) {
6059  Branch(allocation_memento_present, cond, scratch_reg,
6060  Operand(isolate()->factory()->allocation_memento_map()));
6061  }
6062 }
6063 
6064 
6065 Register GetRegisterThatIsNotOneOf(Register reg1,
6066  Register reg2,
6067  Register reg3,
6068  Register reg4,
6069  Register reg5,
6070  Register reg6) {
6071  RegList regs = 0;
6072  if (reg1.is_valid()) regs |= reg1.bit();
6073  if (reg2.is_valid()) regs |= reg2.bit();
6074  if (reg3.is_valid()) regs |= reg3.bit();
6075  if (reg4.is_valid()) regs |= reg4.bit();
6076  if (reg5.is_valid()) regs |= reg5.bit();
6077  if (reg6.is_valid()) regs |= reg6.bit();
6078 
6079  for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
6080  Register candidate = Register::FromAllocationIndex(i);
6081  if (regs & candidate.bit()) continue;
6082  return candidate;
6083  }
6084  UNREACHABLE();
6085  return no_reg;
6086 }
6087 
6088 
6089 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
6090  Register object,
6091  Register scratch0,
6092  Register scratch1,
6093  Label* found) {
6094  DCHECK(!scratch1.is(scratch0));
6095  Factory* factory = isolate()->factory();
6096  Register current = scratch0;
6097  Label loop_again;
6098 
6099  // Scratch contained elements pointer.
6100  Move(current, object);
6101 
6102  // Loop based on the map going up the prototype chain.
6103  bind(&loop_again);
6104  lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
6105  lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
6106  DecodeField<Map::ElementsKindBits>(scratch1);
6107  Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
6108  lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
6109  Branch(&loop_again, ne, current, Operand(factory->null_value()));
6110 }
6111 
6112 
6113 bool AreAliased(Register reg1,
6114  Register reg2,
6115  Register reg3,
6116  Register reg4,
6117  Register reg5,
6118  Register reg6,
6119  Register reg7,
6120  Register reg8) {
6121  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
6122  reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
6123  reg7.is_valid() + reg8.is_valid();
6124 
6125  RegList regs = 0;
6126  if (reg1.is_valid()) regs |= reg1.bit();
6127  if (reg2.is_valid()) regs |= reg2.bit();
6128  if (reg3.is_valid()) regs |= reg3.bit();
6129  if (reg4.is_valid()) regs |= reg4.bit();
6130  if (reg5.is_valid()) regs |= reg5.bit();
6131  if (reg6.is_valid()) regs |= reg6.bit();
6132  if (reg7.is_valid()) regs |= reg7.bit();
6133  if (reg8.is_valid()) regs |= reg8.bit();
6134  int n_of_non_aliasing_regs = NumRegs(regs);
6135 
6136  return n_of_valid_regs != n_of_non_aliasing_regs;
6137 }
6138 
6139 
6140 CodePatcher::CodePatcher(byte* address,
6141  int instructions,
6142  FlushICache flush_cache)
6143  : address_(address),
6144  size_(instructions * Assembler::kInstrSize),
6145  masm_(NULL, address, size_ + Assembler::kGap),
6146  flush_cache_(flush_cache) {
6147  // Create a new macro assembler pointing to the address of the code to patch.
6148  // The size is adjusted with kGap on order for the assembler to generate size
6149  // bytes of instructions without failing with buffer size constraints.
6150  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6151 }
6152 
6153 
6154 CodePatcher::~CodePatcher() {
6155  // Indicate that code has changed.
6156  if (flush_cache_ == FLUSH) {
6157  CpuFeatures::FlushICache(address_, size_);
6158  }
6159 
6160  // Check that the code was patched as expected.
6161  DCHECK(masm_.pc_ == address_ + size_);
6162  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6163 }
6164 
6165 
6166 void CodePatcher::Emit(Instr instr) {
6167  masm()->emit(instr);
6168 }
6169 
6170 
6171 void CodePatcher::Emit(Address addr) {
6172  masm()->emit(reinterpret_cast<Instr>(addr));
6173 }
6174 
6175 
6176 void CodePatcher::ChangeBranchCondition(Condition cond) {
6177  Instr instr = Assembler::instr_at(masm_.pc_);
6178  DCHECK(Assembler::IsBranch(instr));
6179  uint32_t opcode = Assembler::GetOpcodeField(instr);
6180  // Currently only the 'eq' and 'ne' cond values are supported and the simple
6181  // branch instructions (with opcode being the branch type).
6182  // There are some special cases (see Assembler::IsBranch()) so extending this
6183  // would be tricky.
6184  DCHECK(opcode == BEQ ||
6185  opcode == BNE ||
6186  opcode == BLEZ ||
6187  opcode == BGTZ ||
6188  opcode == BEQL ||
6189  opcode == BNEL ||
6190  opcode == BLEZL ||
6191  opcode == BGTZL);
6192  opcode = (cond == eq) ? BEQ : BNE;
6193  instr = (instr & ~kOpcodeMask) | opcode;
6194  masm_.emit(instr);
6195 }
6196 
6197 
6198 void MacroAssembler::TruncatingDiv(Register result,
6199  Register dividend,
6200  int32_t divisor) {
6201  DCHECK(!dividend.is(result));
6202  DCHECK(!dividend.is(at));
6203  DCHECK(!result.is(at));
6204  base::MagicNumbersForDivision<uint32_t> mag =
6205  base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
6206  li(at, Operand(mag.multiplier));
6207  Mulh(result, dividend, Operand(at));
6208  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6209  if (divisor > 0 && neg) {
6210  Addu(result, result, Operand(dividend));
6211  }
6212  if (divisor < 0 && !neg && mag.multiplier > 0) {
6213  Subu(result, result, Operand(dividend));
6214  }
6215  if (mag.shift > 0) sra(result, result, mag.shift);
6216  srl(at, dividend, 31);
6217  Addu(result, result, Operand(at));
6218 }
6219 
6220 
6221 } } // namespace v8::internal
6222 
6223 #endif // V8_TARGET_ARCH_MIPS
#define kDoubleRegZero
#define kDoubleCompareReg
#define cp
#define kLithiumScratchDouble
const int kPageSizeBits
Definition: build_config.h:159
MacroAssembler(Isolate *isolate, void *buffer, int size)
const bool IsMipsSoftFloatABI
#define UNIMPLEMENTED_MIPS()
@ kLittle
@ kLoongson
@ kMips32r6
@ kMips32r2
#define IsFp64Mode()
#define IsMipsArchVariant(check)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be aligned(ARM64 only)") DEFINE_STRING(expose_gc_as
#define kNumSafepointSavedRegisters
Definition: frames-arm64.h:28
#define kSafepointSavedRegisters
Definition: frames-arm64.h:27
#define UNREACHABLE()
Definition: logging.h:30
#define CHECK(condition)
Definition: logging.h:36
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
InvokeFlag
@ JUMP_FUNCTION
@ CALL_FUNCTION
AllocationFlags
@ RESULT_CONTAINS_TOP
@ DOUBLE_ALIGNMENT
@ SIZE_IN_WORDS
@ PRETENURE_OLD_POINTER_SPACE
@ NO_ALLOCATION_FLAGS
@ TAG_OBJECT
@ PRETENURE_OLD_DATA_SPACE
#define STATIC_ASSERT(test)
Definition: macros.h:311
unsigned short uint16_t
Definition: unicode.cc:23
signed short int16_t
Definition: unicode.cc:22
int int32_t
Definition: unicode.cc:24
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
MagicNumbersForDivision< T > SignedDivisionByConstant(T d)
Matcher< Node * > IsBranch(const Matcher< Node * > &value_matcher, const Matcher< Node * > &control_matcher)
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
const intptr_t kHeapObjectTagMask
Definition: v8.h:5739
const int kPointerSize
Definition: globals.h:129
const FPURegister f14
const uint32_t kStringEncodingMask
Definition: objects.h:555
const Register r2
@ DONT_DO_SMI_CHECK
Definition: globals.h:640
@ DO_SMI_CHECK
Definition: globals.h:641
const int kImm16Bits
const FPUControlRegister FCSR
@ kSeqStringTag
Definition: objects.h:563
@ kConsStringTag
Definition: objects.h:564
@ kExternalStringTag
Definition: objects.h:565
const RegList kJSCallerSaved
Definition: frames-arm.h:24
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const int kSafepointRegisterStackIndexMap[kNumRegs]
Definition: frames-mips.h:92
TypeImpl< ZoneTypeConfig > Type
@ kDontCheckForInexactConversion
const SwVfpRegister s1
const int kSmiTagSize
Definition: v8.h:5743
const SwVfpRegister s2
const int kDoubleSizeLog2
Definition: globals.h:138
const int kNumSafepointRegisters
Definition: frames-arm.h:67
const int kDoubleSize
Definition: globals.h:127
const FPURegister f2
const uint32_t kNotStringTag
Definition: objects.h:545
const SwVfpRegister s0
const Register fp
DwVfpRegister DoubleRegister
const uint32_t kFCSRUnderflowFlagMask
const Address kZapValue
Definition: globals.h:269
const Register sp
const int kPointerSizeLog2
Definition: globals.h:147
const uint32_t kStringTag
Definition: objects.h:544
@ LAST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:785
@ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:784
@ FIRST_NONSTRING_TYPE
Definition: objects.h:758
@ LAST_NAME_TYPE
Definition: objects.h:755
@ JS_FUNCTION_TYPE
Definition: objects.h:749
@ FAST_HOLEY_SMI_ELEMENTS
Definition: elements-kind.h:17
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:146
const uint32_t kOneByteStringTag
Definition: objects.h:557
MemOperand FieldMemOperand(Register object, int offset)
const intptr_t kObjectAlignmentMask
Definition: globals.h:227
const FPURegister f12
const SwVfpRegister s3
const uint32_t kFCSRInvalidOpFlagMask
int NumRegs(RegList reglist)
Definition: frames.cc:1582
static const int kInvalidEnumCacheSentinel
const char * GetBailoutReason(BailoutReason reason)
Condition NegateCondition(Condition cond)
Definition: constants-arm.h:86
const uint32_t kStringRepresentationMask
Definition: objects.h:561
uint32_t RegList
Definition: frames.h:18
const SwVfpRegister s6
byte * Address
Definition: globals.h:101
const uint32_t kFCSROverflowFlagMask
const Register r1
const int kImm28Mask
const int kHiMask
const int kHeapObjectTag
Definition: v8.h:5737
const Register no_reg
const uint32_t kIsIndirectStringTag
Definition: objects.h:569
int TenToThe(int exponent)
Definition: utils.h:733
kFeedbackVectorOffset flag
Definition: objects-inl.h:5418
Register ToRegister(int num)
const uint32_t kInternalizedTag
Definition: objects.h:551
static const int kNumberDictionaryProbes
Definition: codegen.h:149
const uint32_t kFCSRInexactFlagMask
const intptr_t kSmiTagMask
Definition: v8.h:5744
const uint32_t kIsNotInternalizedMask
Definition: objects.h:549
const int kOpcodeMask
const uint32_t kNaNOrInfinityLowerBoundUpper32
Definition: globals.h:658
const int kLuiShift
const int kSmiTag
Definition: v8.h:5742
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
static const int kNoCodeAgeSequenceLength
const uint32_t kIsNotStringMask
Definition: objects.h:543
bool IsAligned(T value, U alignment)
Definition: utils.h:123
int NumberOfBitsSet(uint32_t x)
Definition: assembler.h:1073
const int kCharSize
Definition: globals.h:122
const intptr_t kDoubleAlignment
Definition: globals.h:234
const uint32_t kFCSRFlagMask
@ kPointersToHereAreAlwaysInteresting
const intptr_t kPointerAlignment
Definition: globals.h:230
void CopyBytes(uint8_t *target, uint8_t *source)
const intptr_t kDoubleAlignmentMask
Definition: globals.h:235
const uint32_t kIsIndirectStringMask
Definition: objects.h:568
const FPURegister f0
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:130
const int kNumRegisters
Definition: constants-arm.h:34
const int kCArgSlotCount
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
@ None
Definition: v8.h:2211
static Handle< Value > Throw(Isolate *isolate, const char *message)
Definition: d8.cc:72
@ NONE
bool is(Register reg) const