V8 Project
code-stubs-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_MIPS
8 
9 #include "src/base/bits.h"
10 #include "src/bootstrapper.h"
11 #include "src/code-stubs.h"
12 #include "src/codegen.h"
14 #include "src/ic/ic.h"
15 #include "src/isolate.h"
16 #include "src/jsregexp.h"
18 #include "src/runtime/runtime.h"
19 
20 namespace v8 {
21 namespace internal {
22 
23 
24 static void InitializeArrayConstructorDescriptor(
25  Isolate* isolate, CodeStubDescriptor* descriptor,
26  int constant_stack_parameter_count) {
27  Address deopt_handler = Runtime::FunctionForId(
28  Runtime::kArrayConstructor)->entry;
29 
30  if (constant_stack_parameter_count == 0) {
31  descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
33  } else {
34  descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
36  }
37 }
38 
39 
40 static void InitializeInternalArrayConstructorDescriptor(
41  Isolate* isolate, CodeStubDescriptor* descriptor,
42  int constant_stack_parameter_count) {
43  Address deopt_handler = Runtime::FunctionForId(
44  Runtime::kInternalArrayConstructor)->entry;
45 
46  if (constant_stack_parameter_count == 0) {
47  descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
49  } else {
50  descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
52  }
53 }
54 
55 
56 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
57  CodeStubDescriptor* descriptor) {
58  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
59 }
60 
61 
62 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
63  CodeStubDescriptor* descriptor) {
64  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
65 }
66 
67 
68 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
69  CodeStubDescriptor* descriptor) {
70  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
71 }
72 
73 
74 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
75  CodeStubDescriptor* descriptor) {
76  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
77 }
78 
79 
80 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
81  CodeStubDescriptor* descriptor) {
82  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
83 }
84 
85 
86 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
87  CodeStubDescriptor* descriptor) {
88  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
89 }
90 
91 
92 #define __ ACCESS_MASM(masm)
93 
94 
95 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
96  Label* slow,
97  Condition cc);
98 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
99  Register lhs,
100  Register rhs,
101  Label* rhs_not_nan,
102  Label* slow,
103  bool strict);
104 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
105  Register lhs,
106  Register rhs);
107 
108 
109 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
110  ExternalReference miss) {
111  // Update the static counter each time a new code stub is generated.
112  isolate()->counters()->code_stubs()->Increment();
113 
114  CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
115  int param_count = descriptor.GetEnvironmentParameterCount();
116  {
117  // Call the runtime system in a fresh internal frame.
118  FrameScope scope(masm, StackFrame::INTERNAL);
119  DCHECK(param_count == 0 ||
120  a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
121  // Push arguments, adjust sp.
122  __ Subu(sp, sp, Operand(param_count * kPointerSize));
123  for (int i = 0; i < param_count; ++i) {
124  // Store argument to stack.
125  __ sw(descriptor.GetEnvironmentParameterRegister(i),
126  MemOperand(sp, (param_count - 1 - i) * kPointerSize));
127  }
128  __ CallExternalReference(miss, param_count);
129  }
130 
131  __ Ret();
132 }
133 
134 
135 void DoubleToIStub::Generate(MacroAssembler* masm) {
136  Label out_of_range, only_low, negate, done;
137  Register input_reg = source();
138  Register result_reg = destination();
139 
140  int double_offset = offset();
141  // Account for saved regs if input is sp.
142  if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
143 
144  Register scratch =
145  GetRegisterThatIsNotOneOf(input_reg, result_reg);
146  Register scratch2 =
147  GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
148  Register scratch3 =
149  GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
150  DoubleRegister double_scratch = kLithiumScratchDouble;
151 
152  __ Push(scratch, scratch2, scratch3);
153 
154  if (!skip_fastpath()) {
155  // Load double input.
156  __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
157 
158  // Clear cumulative exception flags and save the FCSR.
159  __ cfc1(scratch2, FCSR);
160  __ ctc1(zero_reg, FCSR);
161 
162  // Try a conversion to a signed integer.
163  __ Trunc_w_d(double_scratch, double_scratch);
164  // Move the converted value into the result register.
165  __ mfc1(scratch3, double_scratch);
166 
167  // Retrieve and restore the FCSR.
168  __ cfc1(scratch, FCSR);
169  __ ctc1(scratch2, FCSR);
170 
171  // Check for overflow and NaNs.
172  __ And(
173  scratch, scratch,
176  // If we had no exceptions then set result_reg and we are done.
177  Label error;
178  __ Branch(&error, ne, scratch, Operand(zero_reg));
179  __ Move(result_reg, scratch3);
180  __ Branch(&done);
181  __ bind(&error);
182  }
183 
184  // Load the double value and perform a manual truncation.
185  Register input_high = scratch2;
186  Register input_low = scratch3;
187 
188  __ lw(input_low,
189  MemOperand(input_reg, double_offset + Register::kMantissaOffset));
190  __ lw(input_high,
191  MemOperand(input_reg, double_offset + Register::kExponentOffset));
192 
193  Label normal_exponent, restore_sign;
194  // Extract the biased exponent in result.
195  __ Ext(result_reg,
196  input_high,
199 
200  // Check for Infinity and NaNs, which should return 0.
201  __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
202  __ Movz(result_reg, zero_reg, scratch);
203  __ Branch(&done, eq, scratch, Operand(zero_reg));
204 
205  // Express exponent as delta to (number of mantissa bits + 31).
206  __ Subu(result_reg,
207  result_reg,
209 
210  // If the delta is strictly positive, all bits would be shifted away,
211  // which means that we can return 0.
212  __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
213  __ mov(result_reg, zero_reg);
214  __ Branch(&done);
215 
216  __ bind(&normal_exponent);
217  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
218  // Calculate shift.
219  __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
220 
221  // Save the sign.
222  Register sign = result_reg;
223  result_reg = no_reg;
224  __ And(sign, input_high, Operand(HeapNumber::kSignMask));
225 
226  // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
227  // to check for this specific case.
228  Label high_shift_needed, high_shift_done;
229  __ Branch(&high_shift_needed, lt, scratch, Operand(32));
230  __ mov(input_high, zero_reg);
231  __ Branch(&high_shift_done);
232  __ bind(&high_shift_needed);
233 
234  // Set the implicit 1 before the mantissa part in input_high.
235  __ Or(input_high,
236  input_high,
237  Operand(1 << HeapNumber::kMantissaBitsInTopWord));
238  // Shift the mantissa bits to the correct position.
239  // We don't need to clear non-mantissa bits as they will be shifted away.
240  // If they weren't, it would mean that the answer is in the 32bit range.
241  __ sllv(input_high, input_high, scratch);
242 
243  __ bind(&high_shift_done);
244 
245  // Replace the shifted bits with bits from the lower mantissa word.
246  Label pos_shift, shift_done;
247  __ li(at, 32);
248  __ subu(scratch, at, scratch);
249  __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
250 
251  // Negate scratch.
252  __ Subu(scratch, zero_reg, scratch);
253  __ sllv(input_low, input_low, scratch);
254  __ Branch(&shift_done);
255 
256  __ bind(&pos_shift);
257  __ srlv(input_low, input_low, scratch);
258 
259  __ bind(&shift_done);
260  __ Or(input_high, input_high, Operand(input_low));
261  // Restore sign if necessary.
262  __ mov(scratch, sign);
263  result_reg = sign;
264  sign = no_reg;
265  __ Subu(result_reg, zero_reg, input_high);
266  __ Movz(result_reg, input_high, scratch);
267 
268  __ bind(&done);
269 
270  __ Pop(scratch, scratch2, scratch3);
271  __ Ret();
272 }
273 
274 
276  Isolate* isolate) {
277  WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
278  WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
279  stub1.GetCode();
280  stub2.GetCode();
281 }
282 
283 
284 // See comment for class, this does NOT work for int32's that are in Smi range.
285 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
286  Label max_negative_int;
287  // the_int_ has the answer which is a signed int32 but not a Smi.
288  // We test for the special value that has a different exponent.
289  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
290  // Test sign, and save for later conditionals.
291  __ And(sign(), the_int(), Operand(0x80000000u));
292  __ Branch(&max_negative_int, eq, the_int(), Operand(0x80000000u));
293 
294  // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
295  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
296  uint32_t non_smi_exponent =
298  __ li(scratch(), Operand(non_smi_exponent));
299  // Set the sign bit in scratch_ if the value was negative.
300  __ or_(scratch(), scratch(), sign());
301  // Subtract from 0 if the value was negative.
302  __ subu(at, zero_reg, the_int());
303  __ Movn(the_int(), at, sign());
304  // We should be masking the implict first digit of the mantissa away here,
305  // but it just ends up combining harmlessly with the last digit of the
306  // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
307  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
308  DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
309  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
310  __ srl(at, the_int(), shift_distance);
311  __ or_(scratch(), scratch(), at);
313  HeapNumber::kExponentOffset));
314  __ sll(scratch(), the_int(), 32 - shift_distance);
315  __ Ret(USE_DELAY_SLOT);
317  HeapNumber::kMantissaOffset));
318 
319  __ bind(&max_negative_int);
320  // The max negative int32 is stored as a positive number in the mantissa of
321  // a double because it uses a sign bit instead of using two's complement.
322  // The actual mantissa bits stored are all 0 because the implicit most
323  // significant 1 bit is not stored.
324  non_smi_exponent += 1 << HeapNumber::kExponentShift;
325  __ li(scratch(), Operand(HeapNumber::kSignMask | non_smi_exponent));
326  __ sw(scratch(),
327  FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
328  __ mov(scratch(), zero_reg);
329  __ Ret(USE_DELAY_SLOT);
330  __ sw(scratch(),
331  FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
332 }
333 
334 
335 // Handle the case where the lhs and rhs are the same object.
336 // Equality is almost reflexive (everything but NaN), so this is a test
337 // for "identity and not NaN".
338 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
339  Label* slow,
340  Condition cc) {
341  Label not_identical;
342  Label heap_number, return_equal;
343  Register exp_mask_reg = t5;
344 
345  __ Branch(&not_identical, ne, a0, Operand(a1));
346 
347  __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
348 
349  // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
350  // so we do the second best thing - test it ourselves.
351  // They are both equal and they are not both Smis so both of them are not
352  // Smis. If it's not a heap number, then return equal.
353  if (cc == less || cc == greater) {
354  __ GetObjectType(a0, t4, t4);
355  __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
356  } else {
357  __ GetObjectType(a0, t4, t4);
358  __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
359  // Comparing JS objects with <=, >= is complicated.
360  if (cc != eq) {
361  __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
362  // Normally here we fall through to return_equal, but undefined is
363  // special: (undefined == undefined) == true, but
364  // (undefined <= undefined) == false! See ECMAScript 11.8.5.
365  if (cc == less_equal || cc == greater_equal) {
366  __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
367  __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
368  __ Branch(&return_equal, ne, a0, Operand(t2));
369  DCHECK(is_int16(GREATER) && is_int16(LESS));
370  __ Ret(USE_DELAY_SLOT);
371  if (cc == le) {
372  // undefined <= undefined should fail.
373  __ li(v0, Operand(GREATER));
374  } else {
375  // undefined >= undefined should fail.
376  __ li(v0, Operand(LESS));
377  }
378  }
379  }
380  }
381 
382  __ bind(&return_equal);
383  DCHECK(is_int16(GREATER) && is_int16(LESS));
384  __ Ret(USE_DELAY_SLOT);
385  if (cc == less) {
386  __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
387  } else if (cc == greater) {
388  __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
389  } else {
390  __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
391  }
392 
393  // For less and greater we don't have to check for NaN since the result of
394  // x < x is false regardless. For the others here is some code to check
395  // for NaN.
396  if (cc != lt && cc != gt) {
397  __ bind(&heap_number);
398  // It is a heap number, so return non-equal if it's NaN and equal if it's
399  // not NaN.
400 
401  // The representation of NaN values has all exponent bits (52..62) set,
402  // and not all mantissa bits (0..51) clear.
403  // Read top bits of double representation (second word of value).
404  __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
405  // Test that exponent bits are all set.
406  __ And(t3, t2, Operand(exp_mask_reg));
407  // If all bits not set (ne cond), then not a NaN, objects are equal.
408  __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
409 
410  // Shift out flag and all exponent bits, retaining only mantissa.
412  // Or with all low-bits of mantissa.
413  __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
414  __ Or(v0, t3, Operand(t2));
415  // For equal we already have the right value in v0: Return zero (equal)
416  // if all bits in mantissa are zero (it's an Infinity) and non-zero if
417  // not (it's a NaN). For <= and >= we need to load v0 with the failing
418  // value if it's a NaN.
419  if (cc != eq) {
420  // All-zero means Infinity means equal.
421  __ Ret(eq, v0, Operand(zero_reg));
422  DCHECK(is_int16(GREATER) && is_int16(LESS));
423  __ Ret(USE_DELAY_SLOT);
424  if (cc == le) {
425  __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
426  } else {
427  __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
428  }
429  }
430  }
431  // No fall through here.
432 
433  __ bind(&not_identical);
434 }
435 
436 
437 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
438  Register lhs,
439  Register rhs,
440  Label* both_loaded_as_doubles,
441  Label* slow,
442  bool strict) {
443  DCHECK((lhs.is(a0) && rhs.is(a1)) ||
444  (lhs.is(a1) && rhs.is(a0)));
445 
446  Label lhs_is_smi;
447  __ JumpIfSmi(lhs, &lhs_is_smi);
448  // Rhs is a Smi.
449  // Check whether the non-smi is a heap number.
450  __ GetObjectType(lhs, t4, t4);
451  if (strict) {
452  // If lhs was not a number and rhs was a Smi then strict equality cannot
453  // succeed. Return non-equal (lhs is already not zero).
454  __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
455  __ mov(v0, lhs);
456  } else {
457  // Smi compared non-strictly with a non-Smi non-heap-number. Call
458  // the runtime.
459  __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
460  }
461 
462  // Rhs is a smi, lhs is a number.
463  // Convert smi rhs to double.
464  __ sra(at, rhs, kSmiTagSize);
465  __ mtc1(at, f14);
466  __ cvt_d_w(f14, f14);
468 
469  // We now have both loaded as doubles.
470  __ jmp(both_loaded_as_doubles);
471 
472  __ bind(&lhs_is_smi);
473  // Lhs is a Smi. Check whether the non-smi is a heap number.
474  __ GetObjectType(rhs, t4, t4);
475  if (strict) {
476  // If lhs was not a number and rhs was a Smi then strict equality cannot
477  // succeed. Return non-equal.
478  __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
479  __ li(v0, Operand(1));
480  } else {
481  // Smi compared non-strictly with a non-Smi non-heap-number. Call
482  // the runtime.
483  __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
484  }
485 
486  // Lhs is a smi, rhs is a number.
487  // Convert smi lhs to double.
488  __ sra(at, lhs, kSmiTagSize);
489  __ mtc1(at, f12);
490  __ cvt_d_w(f12, f12);
492  // Fall through to both_loaded_as_doubles.
493 }
494 
495 
496 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
497  Register lhs,
498  Register rhs) {
499  // If either operand is a JS object or an oddball value, then they are
500  // not equal since their pointers are different.
501  // There is no test for undetectability in strict equality.
503  Label first_non_object;
504  // Get the type of the first operand into a2 and compare it with
505  // FIRST_SPEC_OBJECT_TYPE.
506  __ GetObjectType(lhs, a2, a2);
507  __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
508 
509  // Return non-zero.
510  Label return_not_equal;
511  __ bind(&return_not_equal);
512  __ Ret(USE_DELAY_SLOT);
513  __ li(v0, Operand(1));
514 
515  __ bind(&first_non_object);
516  // Check for oddballs: true, false, null, undefined.
517  __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
518 
519  __ GetObjectType(rhs, a3, a3);
520  __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
521 
522  // Check for oddballs: true, false, null, undefined.
523  __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
524 
525  // Now that we have the types we might as well check for
526  // internalized-internalized.
528  __ Or(a2, a2, Operand(a3));
529  __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
530  __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
531 }
532 
533 
534 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
535  Register lhs,
536  Register rhs,
537  Label* both_loaded_as_doubles,
538  Label* not_heap_numbers,
539  Label* slow) {
540  __ GetObjectType(lhs, a3, a2);
541  __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
543  // If first was a heap number & second wasn't, go to slow case.
544  __ Branch(slow, ne, a3, Operand(a2));
545 
546  // Both are heap numbers. Load them up then jump to the code we have
547  // for that.
550 
551  __ jmp(both_loaded_as_doubles);
552 }
553 
554 
555 // Fast negative check for internalized-to-internalized equality.
556 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
557  Register lhs,
558  Register rhs,
559  Label* possible_strings,
560  Label* not_both_strings) {
561  DCHECK((lhs.is(a0) && rhs.is(a1)) ||
562  (lhs.is(a1) && rhs.is(a0)));
563 
564  // a2 is object type of rhs.
565  Label object_test;
567  __ And(at, a2, Operand(kIsNotStringMask));
568  __ Branch(&object_test, ne, at, Operand(zero_reg));
569  __ And(at, a2, Operand(kIsNotInternalizedMask));
570  __ Branch(possible_strings, ne, at, Operand(zero_reg));
571  __ GetObjectType(rhs, a3, a3);
572  __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
573  __ And(at, a3, Operand(kIsNotInternalizedMask));
574  __ Branch(possible_strings, ne, at, Operand(zero_reg));
575 
576  // Both are internalized strings. We already checked they weren't the same
577  // pointer so they are not equal.
578  __ Ret(USE_DELAY_SLOT);
579  __ li(v0, Operand(1)); // Non-zero indicates not equal.
580 
581  __ bind(&object_test);
582  __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
583  __ GetObjectType(rhs, a2, a3);
584  __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
585 
586  // If both objects are undetectable, they are equal. Otherwise, they
587  // are not equal, since they are different objects and an object is not
588  // equal to undefined.
592  __ and_(a0, a2, a3);
593  __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
594  __ Ret(USE_DELAY_SLOT);
595  __ xori(v0, a0, 1 << Map::kIsUndetectable);
596 }
597 
598 
599 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
600  Register scratch,
601  CompareICState::State expected,
602  Label* fail) {
603  Label ok;
604  if (expected == CompareICState::SMI) {
605  __ JumpIfNotSmi(input, fail);
606  } else if (expected == CompareICState::NUMBER) {
607  __ JumpIfSmi(input, &ok);
608  __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
610  }
611  // We could be strict about internalized/string here, but as long as
612  // hydrogen doesn't care, the stub doesn't have to care either.
613  __ bind(&ok);
614 }
615 
616 
617 // On entry a1 and a2 are the values to be compared.
618 // On exit a0 is 0, positive or negative to indicate the result of
619 // the comparison.
620 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
621  Register lhs = a1;
622  Register rhs = a0;
624 
625  Label miss;
626  CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
627  CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
628 
629  Label slow; // Call builtin.
630  Label not_smis, both_loaded_as_doubles;
631 
632  Label not_two_smis, smi_done;
633  __ Or(a2, a1, a0);
634  __ JumpIfNotSmi(a2, &not_two_smis);
635  __ sra(a1, a1, 1);
636  __ sra(a0, a0, 1);
637  __ Ret(USE_DELAY_SLOT);
638  __ subu(v0, a1, a0);
639  __ bind(&not_two_smis);
640 
641  // NOTICE! This code is only reached after a smi-fast-case check, so
642  // it is certain that at least one operand isn't a smi.
643 
644  // Handle the case where the objects are identical. Either returns the answer
645  // or goes to slow. Only falls through if the objects were not identical.
646  EmitIdenticalObjectComparison(masm, &slow, cc);
647 
648  // If either is a Smi (we know that not both are), then they can only
649  // be strictly equal if the other is a HeapNumber.
650  STATIC_ASSERT(kSmiTag == 0);
651  DCHECK_EQ(0, Smi::FromInt(0));
652  __ And(t2, lhs, Operand(rhs));
653  __ JumpIfNotSmi(t2, &not_smis, t0);
654  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
655  // 1) Return the answer.
656  // 2) Go to slow.
657  // 3) Fall through to both_loaded_as_doubles.
658  // 4) Jump to rhs_not_nan.
659  // In cases 3 and 4 we have found out we were dealing with a number-number
660  // comparison and the numbers have been loaded into f12 and f14 as doubles,
661  // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
662  EmitSmiNonsmiComparison(masm, lhs, rhs,
663  &both_loaded_as_doubles, &slow, strict());
664 
665  __ bind(&both_loaded_as_doubles);
666  // f12, f14 are the double representations of the left hand side
667  // and the right hand side if we have FPU. Otherwise a2, a3 represent
668  // left hand side and a0, a1 represent right hand side.
669  Label nan;
670  __ li(t0, Operand(LESS));
671  __ li(t1, Operand(GREATER));
672  __ li(t2, Operand(EQUAL));
673 
674  // Check if either rhs or lhs is NaN.
675  __ BranchF(NULL, &nan, eq, f12, f14);
676 
677  // Check if LESS condition is satisfied. If true, move conditionally
678  // result to v0.
680  __ c(OLT, D, f12, f14);
681  __ Movt(v0, t0);
682  // Use previous check to store conditionally to v0 oposite condition
683  // (GREATER). If rhs is equal to lhs, this will be corrected in next
684  // check.
685  __ Movf(v0, t1);
686  // Check if EQUAL condition is satisfied. If true, move conditionally
687  // result to v0.
688  __ c(EQ, D, f12, f14);
689  __ Movt(v0, t2);
690  } else {
691  Label skip;
692  __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
693  __ mov(v0, t0); // Return LESS as result.
694 
695  __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
696  __ mov(v0, t2); // Return EQUAL as result.
697 
698  __ mov(v0, t1); // Return GREATER as result.
699  __ bind(&skip);
700  }
701 
702  __ Ret();
703 
704  __ bind(&nan);
705  // NaN comparisons always fail.
706  // Load whatever we need in v0 to make the comparison fail.
707  DCHECK(is_int16(GREATER) && is_int16(LESS));
708  __ Ret(USE_DELAY_SLOT);
709  if (cc == lt || cc == le) {
710  __ li(v0, Operand(GREATER));
711  } else {
712  __ li(v0, Operand(LESS));
713  }
714 
715 
716  __ bind(&not_smis);
717  // At this point we know we are dealing with two different objects,
718  // and neither of them is a Smi. The objects are in lhs_ and rhs_.
719  if (strict()) {
720  // This returns non-equal for some object types, or falls through if it
721  // was not lucky.
722  EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
723  }
724 
725  Label check_for_internalized_strings;
726  Label flat_string_check;
727  // Check for heap-number-heap-number comparison. Can jump to slow case,
728  // or load both doubles and jump to the code that handles
729  // that case. If the inputs are not doubles then jumps to
730  // check_for_internalized_strings.
731  // In this case a2 will contain the type of lhs_.
732  EmitCheckForTwoHeapNumbers(masm,
733  lhs,
734  rhs,
735  &both_loaded_as_doubles,
736  &check_for_internalized_strings,
737  &flat_string_check);
738 
739  __ bind(&check_for_internalized_strings);
740  if (cc == eq && !strict()) {
741  // Returns an answer for two internalized strings or two
742  // detectable objects.
743  // Otherwise jumps to string case or not both strings case.
744  // Assumes that a2 is the type of lhs_ on entry.
745  EmitCheckForInternalizedStringsOrObjects(
746  masm, lhs, rhs, &flat_string_check, &slow);
747  }
748 
749  // Check for both being sequential one-byte strings,
750  // and inline if that is the case.
751  __ bind(&flat_string_check);
752 
753  __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
754 
755  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
756  a3);
757  if (cc == eq) {
758  StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, t0);
759  } else {
760  StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, t0,
761  t1);
762  }
763  // Never falls through to here.
764 
765  __ bind(&slow);
766  // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
767  // a1 (rhs) second.
768  __ Push(lhs, rhs);
769  // Figure out which native to call and setup the arguments.
770  Builtins::JavaScript native;
771  if (cc == eq) {
772  native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
773  } else {
774  native = Builtins::COMPARE;
775  int ncr; // NaN compare result.
776  if (cc == lt || cc == le) {
777  ncr = GREATER;
778  } else {
779  DCHECK(cc == gt || cc == ge); // Remaining cases.
780  ncr = LESS;
781  }
782  __ li(a0, Operand(Smi::FromInt(ncr)));
783  __ push(a0);
784  }
785 
786  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
787  // tagged as a small integer.
788  __ InvokeBuiltin(native, JUMP_FUNCTION);
789 
790  __ bind(&miss);
791  GenerateMiss(masm);
792 }
793 
794 
795 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
796  __ mov(t9, ra);
797  __ pop(ra);
798  __ PushSafepointRegisters();
799  __ Jump(t9);
800 }
801 
802 
803 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
804  __ mov(t9, ra);
805  __ pop(ra);
806  __ PopSafepointRegisters();
807  __ Jump(t9);
808 }
809 
810 
811 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
812  // We don't allow a GC during a store buffer overflow so there is no need to
813  // store the registers in any particular way, but we do have to store and
814  // restore them.
815  __ MultiPush(kJSCallerSaved | ra.bit());
816  if (save_doubles()) {
817  __ MultiPushFPU(kCallerSavedFPU);
818  }
819  const int argument_count = 1;
820  const int fp_argument_count = 0;
821  const Register scratch = a1;
822 
823  AllowExternalCallThatCantCauseGC scope(masm);
824  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
825  __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
826  __ CallCFunction(
827  ExternalReference::store_buffer_overflow_function(isolate()),
828  argument_count);
829  if (save_doubles()) {
830  __ MultiPopFPU(kCallerSavedFPU);
831  }
832 
833  __ MultiPop(kJSCallerSaved | ra.bit());
834  __ Ret();
835 }
836 
837 
838 void MathPowStub::Generate(MacroAssembler* masm) {
839  const Register base = a1;
840  const Register exponent = MathPowTaggedDescriptor::exponent();
841  DCHECK(exponent.is(a2));
842  const Register heapnumbermap = t1;
843  const Register heapnumber = v0;
844  const DoubleRegister double_base = f2;
845  const DoubleRegister double_exponent = f4;
846  const DoubleRegister double_result = f0;
847  const DoubleRegister double_scratch = f6;
848  const FPURegister single_scratch = f8;
849  const Register scratch = t5;
850  const Register scratch2 = t3;
851 
852  Label call_runtime, done, int_exponent;
853  if (exponent_type() == ON_STACK) {
854  Label base_is_smi, unpack_exponent;
855  // The exponent and base are supplied as arguments on the stack.
856  // This can only happen if the stub is called from non-optimized code.
857  // Load input parameters from stack to double registers.
858  __ lw(base, MemOperand(sp, 1 * kPointerSize));
859  __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
860 
861  __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
862 
863  __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
864  __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
865  __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
866 
867  __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
868  __ jmp(&unpack_exponent);
869 
870  __ bind(&base_is_smi);
871  __ mtc1(scratch, single_scratch);
872  __ cvt_d_w(double_base, single_scratch);
873  __ bind(&unpack_exponent);
874 
875  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
876 
877  __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
878  __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
879  __ ldc1(double_exponent,
881  } else if (exponent_type() == TAGGED) {
882  // Base is already in double_base.
883  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
884 
885  __ ldc1(double_exponent,
887  }
888 
889  if (exponent_type() != INTEGER) {
890  Label int_exponent_convert;
891  // Detect integer exponents stored as double.
892  __ EmitFPUTruncate(kRoundToMinusInf,
893  scratch,
894  double_exponent,
895  at,
896  double_scratch,
897  scratch2,
899  // scratch2 == 0 means there was no conversion error.
900  __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
901 
902  if (exponent_type() == ON_STACK) {
903  // Detect square root case. Crankshaft detects constant +/-0.5 at
904  // compile time and uses DoMathPowHalf instead. We then skip this check
905  // for non-constant cases of +/-0.5 as these hardly occur.
906  Label not_plus_half;
907 
908  // Test for 0.5.
909  __ Move(double_scratch, 0.5);
910  __ BranchF(USE_DELAY_SLOT,
911  &not_plus_half,
912  NULL,
913  ne,
914  double_exponent,
915  double_scratch);
916  // double_scratch can be overwritten in the delay slot.
917  // Calculates square root of base. Check for the special case of
918  // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
919  __ Move(double_scratch, -V8_INFINITY);
920  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
921  __ neg_d(double_result, double_scratch);
922 
923  // Add +0 to convert -0 to +0.
924  __ add_d(double_scratch, double_base, kDoubleRegZero);
925  __ sqrt_d(double_result, double_scratch);
926  __ jmp(&done);
927 
928  __ bind(&not_plus_half);
929  __ Move(double_scratch, -0.5);
930  __ BranchF(USE_DELAY_SLOT,
931  &call_runtime,
932  NULL,
933  ne,
934  double_exponent,
935  double_scratch);
936  // double_scratch can be overwritten in the delay slot.
937  // Calculates square root of base. Check for the special case of
938  // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
939  __ Move(double_scratch, -V8_INFINITY);
940  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
941  __ Move(double_result, kDoubleRegZero);
942 
943  // Add +0 to convert -0 to +0.
944  __ add_d(double_scratch, double_base, kDoubleRegZero);
945  __ Move(double_result, 1);
946  __ sqrt_d(double_scratch, double_scratch);
947  __ div_d(double_result, double_result, double_scratch);
948  __ jmp(&done);
949  }
950 
951  __ push(ra);
952  {
953  AllowExternalCallThatCantCauseGC scope(masm);
954  __ PrepareCallCFunction(0, 2, scratch2);
955  __ MovToFloatParameters(double_base, double_exponent);
956  __ CallCFunction(
957  ExternalReference::power_double_double_function(isolate()),
958  0, 2);
959  }
960  __ pop(ra);
961  __ MovFromFloatResult(double_result);
962  __ jmp(&done);
963 
964  __ bind(&int_exponent_convert);
965  }
966 
967  // Calculate power with integer exponent.
968  __ bind(&int_exponent);
969 
970  // Get two copies of exponent in the registers scratch and exponent.
971  if (exponent_type() == INTEGER) {
972  __ mov(scratch, exponent);
973  } else {
974  // Exponent has previously been stored into scratch as untagged integer.
975  __ mov(exponent, scratch);
976  }
977 
978  __ mov_d(double_scratch, double_base); // Back up base.
979  __ Move(double_result, 1.0);
980 
981  // Get absolute value of exponent.
982  Label positive_exponent;
983  __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
984  __ Subu(scratch, zero_reg, scratch);
985  __ bind(&positive_exponent);
986 
987  Label while_true, no_carry, loop_end;
988  __ bind(&while_true);
989 
990  __ And(scratch2, scratch, 1);
991 
992  __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
993  __ mul_d(double_result, double_result, double_scratch);
994  __ bind(&no_carry);
995 
996  __ sra(scratch, scratch, 1);
997 
998  __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
999  __ mul_d(double_scratch, double_scratch, double_scratch);
1000 
1001  __ Branch(&while_true);
1002 
1003  __ bind(&loop_end);
1004 
1005  __ Branch(&done, ge, exponent, Operand(zero_reg));
1006  __ Move(double_scratch, 1.0);
1007  __ div_d(double_result, double_scratch, double_result);
1008  // Test whether result is zero. Bail out to check for subnormal result.
1009  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1010  __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
1011 
1012  // double_exponent may not contain the exponent value if the input was a
1013  // smi. We set it with exponent value before bailing out.
1014  __ mtc1(exponent, single_scratch);
1015  __ cvt_d_w(double_exponent, single_scratch);
1016 
1017  // Returning or bailing out.
1018  Counters* counters = isolate()->counters();
1019  if (exponent_type() == ON_STACK) {
1020  // The arguments are still on the stack.
1021  __ bind(&call_runtime);
1022  __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
1023 
1024  // The stub is called from non-optimized code, which expects the result
1025  // as heap number in exponent.
1026  __ bind(&done);
1027  __ AllocateHeapNumber(
1028  heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1029  __ sdc1(double_result,
1031  DCHECK(heapnumber.is(v0));
1032  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1033  __ DropAndRet(2);
1034  } else {
1035  __ push(ra);
1036  {
1037  AllowExternalCallThatCantCauseGC scope(masm);
1038  __ PrepareCallCFunction(0, 2, scratch);
1039  __ MovToFloatParameters(double_base, double_exponent);
1040  __ CallCFunction(
1041  ExternalReference::power_double_double_function(isolate()),
1042  0, 2);
1043  }
1044  __ pop(ra);
1045  __ MovFromFloatResult(double_result);
1046 
1047  __ bind(&done);
1048  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1049  __ Ret();
1050  }
1051 }
1052 
1053 
1055  return true;
1056 }
1057 
1058 
1059 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1069  BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1070 }
1071 
1072 
1073 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1074  StoreRegistersStateStub stub(isolate);
1075  stub.GetCode();
1076 }
1077 
1078 
1079 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1080  RestoreRegistersStateStub stub(isolate);
1081  stub.GetCode();
1082 }
1083 
1084 
1085 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1086  // Generate if not already in cache.
1088  CEntryStub(isolate, 1, mode).GetCode();
1089  StoreBufferOverflowStub(isolate, mode).GetCode();
1090  isolate->set_fp_stubs_generated(true);
1091 }
1092 
1093 
1094 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1095  CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1096  stub.GetCode();
1097 }
1098 
1099 
1100 void CEntryStub::Generate(MacroAssembler* masm) {
1101  // Called from JavaScript; parameters are on stack as if calling JS function
1102  // a0: number of arguments including receiver
1103  // a1: pointer to builtin function
1104  // fp: frame pointer (restored after C call)
1105  // sp: stack pointer (restored as callee's sp after C call)
1106  // cp: current context (C callee-saved)
1107 
1109 
1110  // Compute the argv pointer in a callee-saved register.
1111  __ sll(s1, a0, kPointerSizeLog2);
1112  __ Addu(s1, sp, s1);
1113  __ Subu(s1, s1, kPointerSize);
1114 
1115  // Enter the exit frame that transitions from JavaScript to C++.
1116  FrameScope scope(masm, StackFrame::MANUAL);
1117  __ EnterExitFrame(save_doubles());
1118 
1119  // s0: number of arguments including receiver (C callee-saved)
1120  // s1: pointer to first argument (C callee-saved)
1121  // s2: pointer to builtin function (C callee-saved)
1122 
1123  // Prepare arguments for C routine.
1124  // a0 = argc
1125  __ mov(s0, a0);
1126  __ mov(s2, a1);
1127  // a1 = argv (set in the delay slot after find_ra below).
1128 
1129  // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1130  // also need to reserve the 4 argument slots on the stack.
1131 
1132  __ AssertStackIsAligned();
1133 
1134  __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1135 
1136  // To let the GC traverse the return address of the exit frames, we need to
1137  // know where the return address is. The CEntryStub is unmovable, so
1138  // we can store the address on the stack to be able to find it again and
1139  // we never have to restore it, because it will not change.
1140  { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1141  // This branch-and-link sequence is needed to find the current PC on mips,
1142  // saved to the ra register.
1143  // Use masm-> here instead of the double-underscore macro since extra
1144  // coverage code can interfere with the proper calculation of ra.
1145  Label find_ra;
1146  masm->bal(&find_ra); // bal exposes branch delay slot.
1147  masm->mov(a1, s1);
1148  masm->bind(&find_ra);
1149 
1150  // Adjust the value in ra to point to the correct return location, 2nd
1151  // instruction past the real call into C code (the jalr(t9)), and push it.
1152  // This is the return address of the exit frame.
1153  const int kNumInstructionsToJump = 5;
1154  masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
1155  masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
1156  // Stack space reservation moved to the branch delay slot below.
1157  // Stack is still aligned.
1158 
1159  // Call the C routine.
1160  masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
1161  masm->jalr(t9);
1162  // Set up sp in the delay slot.
1163  masm->addiu(sp, sp, -kCArgsSlotsSize);
1164  // Make sure the stored 'ra' points to this position.
1165  DCHECK_EQ(kNumInstructionsToJump,
1166  masm->InstructionsGeneratedSince(&find_ra));
1167  }
1168 
1169 
1170  // Runtime functions should not return 'the hole'. Allowing it to escape may
1171  // lead to crashes in the IC code later.
1172  if (FLAG_debug_code) {
1173  Label okay;
1174  __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1175  __ Branch(&okay, ne, v0, Operand(t0));
1176  __ stop("The hole escaped");
1177  __ bind(&okay);
1178  }
1179 
1180  // Check result for exception sentinel.
1181  Label exception_returned;
1182  __ LoadRoot(t0, Heap::kExceptionRootIndex);
1183  __ Branch(&exception_returned, eq, t0, Operand(v0));
1184 
1185  ExternalReference pending_exception_address(
1186  Isolate::kPendingExceptionAddress, isolate());
1187 
1188  // Check that there is no pending exception, otherwise we
1189  // should have returned the exception sentinel.
1190  if (FLAG_debug_code) {
1191  Label okay;
1192  __ li(a2, Operand(pending_exception_address));
1193  __ lw(a2, MemOperand(a2));
1194  __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1195  // Cannot use check here as it attempts to generate call into runtime.
1196  __ Branch(&okay, eq, t0, Operand(a2));
1197  __ stop("Unexpected pending exception");
1198  __ bind(&okay);
1199  }
1200 
1201  // Exit C frame and return.
1202  // v0:v1: result
1203  // sp: stack pointer
1204  // fp: frame pointer
1205  // s0: still holds argc (callee-saved).
1206  __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
1207 
1208  // Handling of exception.
1209  __ bind(&exception_returned);
1210 
1211  // Retrieve the pending exception.
1212  __ li(a2, Operand(pending_exception_address));
1213  __ lw(v0, MemOperand(a2));
1214 
1215  // Clear the pending exception.
1216  __ li(a3, Operand(isolate()->factory()->the_hole_value()));
1217  __ sw(a3, MemOperand(a2));
1218 
1219  // Special handling of termination exceptions which are uncatchable
1220  // by javascript code.
1221  Label throw_termination_exception;
1222  __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
1223  __ Branch(&throw_termination_exception, eq, v0, Operand(t0));
1224 
1225  // Handle normal exception.
1226  __ Throw(v0);
1227 
1228  __ bind(&throw_termination_exception);
1229  __ ThrowUncatchable(v0);
1230 }
1231 
1232 
1233 void JSEntryStub::Generate(MacroAssembler* masm) {
1234  Label invoke, handler_entry, exit;
1235  Isolate* isolate = masm->isolate();
1236 
1237  // Registers:
1238  // a0: entry address
1239  // a1: function
1240  // a2: receiver
1241  // a3: argc
1242  //
1243  // Stack:
1244  // 4 args slots
1245  // args
1246 
1248 
1249  // Save callee saved registers on the stack.
1250  __ MultiPush(kCalleeSaved | ra.bit());
1251 
1252  // Save callee-saved FPU registers.
1253  __ MultiPushFPU(kCalleeSavedFPU);
1254  // Set up the reserved register for 0.0.
1255  __ Move(kDoubleRegZero, 0.0);
1256 
1257 
1258  // Load argv in s0 register.
1259  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1260  offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1261 
1262  __ InitializeRootRegister();
1263  __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1264 
1265  // We build an EntryFrame.
1266  __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1267  int marker = type();
1268  __ li(t2, Operand(Smi::FromInt(marker)));
1269  __ li(t1, Operand(Smi::FromInt(marker)));
1270  __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1271  isolate)));
1272  __ lw(t0, MemOperand(t0));
1273  __ Push(t3, t2, t1, t0);
1274  // Set up frame pointer for the frame to be pushed.
1276 
1277  // Registers:
1278  // a0: entry_address
1279  // a1: function
1280  // a2: receiver_pointer
1281  // a3: argc
1282  // s0: argv
1283  //
1284  // Stack:
1285  // caller fp |
1286  // function slot | entry frame
1287  // context slot |
1288  // bad fp (0xff...f) |
1289  // callee saved registers + ra
1290  // 4 args slots
1291  // args
1292 
1293  // If this is the outermost JS call, set js_entry_sp value.
1294  Label non_outermost_js;
1295  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1296  __ li(t1, Operand(ExternalReference(js_entry_sp)));
1297  __ lw(t2, MemOperand(t1));
1298  __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
1299  __ sw(fp, MemOperand(t1));
1300  __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1301  Label cont;
1302  __ b(&cont);
1303  __ nop(); // Branch delay slot nop.
1304  __ bind(&non_outermost_js);
1305  __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1306  __ bind(&cont);
1307  __ push(t0);
1308 
1309  // Jump to a faked try block that does the invoke, with a faked catch
1310  // block that sets the pending exception.
1311  __ jmp(&invoke);
1312  __ bind(&handler_entry);
1313  handler_offset_ = handler_entry.pos();
1314  // Caught exception: Store result (exception) in the pending exception
1315  // field in the JSEnv and return a failure sentinel. Coming in here the
1316  // fp will be invalid because the PushTryHandler below sets it to 0 to
1317  // signal the existence of the JSEntry frame.
1318  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1319  isolate)));
1320  __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
1321  __ LoadRoot(v0, Heap::kExceptionRootIndex);
1322  __ b(&exit); // b exposes branch delay slot.
1323  __ nop(); // Branch delay slot nop.
1324 
1325  // Invoke: Link this frame into the handler chain. There's only one
1326  // handler block in this code object, so its index is 0.
1327  __ bind(&invoke);
1328  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1329  // If an exception not caught by another handler occurs, this handler
1330  // returns control to the code after the bal(&invoke) above, which
1331  // restores all kCalleeSaved registers (including cp and fp) to their
1332  // saved values before returning a failure to C.
1333 
1334  // Clear any pending exceptions.
1335  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
1336  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1337  isolate)));
1338  __ sw(t1, MemOperand(t0));
1339 
1340  // Invoke the function by calling through JS entry trampoline builtin.
1341  // Notice that we cannot store a reference to the trampoline code directly in
1342  // this stub, because runtime stubs are not traversed when doing GC.
1343 
1344  // Registers:
1345  // a0: entry_address
1346  // a1: function
1347  // a2: receiver_pointer
1348  // a3: argc
1349  // s0: argv
1350  //
1351  // Stack:
1352  // handler frame
1353  // entry frame
1354  // callee saved registers + ra
1355  // 4 args slots
1356  // args
1357 
1358  if (type() == StackFrame::ENTRY_CONSTRUCT) {
1359  ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1360  isolate);
1361  __ li(t0, Operand(construct_entry));
1362  } else {
1363  ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1364  __ li(t0, Operand(entry));
1365  }
1366  __ lw(t9, MemOperand(t0)); // Deref address.
1367 
1368  // Call JSEntryTrampoline.
1369  __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1370  __ Call(t9);
1371 
1372  // Unlink this frame from the handler chain.
1373  __ PopTryHandler();
1374 
1375  __ bind(&exit); // v0 holds result
1376  // Check if the current stack frame is marked as the outermost JS frame.
1377  Label non_outermost_js_2;
1378  __ pop(t1);
1379  __ Branch(&non_outermost_js_2,
1380  ne,
1381  t1,
1382  Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1383  __ li(t1, Operand(ExternalReference(js_entry_sp)));
1384  __ sw(zero_reg, MemOperand(t1));
1385  __ bind(&non_outermost_js_2);
1386 
1387  // Restore the top frame descriptors from the stack.
1388  __ pop(t1);
1389  __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1390  isolate)));
1391  __ sw(t1, MemOperand(t0));
1392 
1393  // Reset the stack to the callee saved registers.
1395 
1396  // Restore callee-saved fpu registers.
1397  __ MultiPopFPU(kCalleeSavedFPU);
1398 
1399  // Restore callee saved registers from the stack.
1400  __ MultiPop(kCalleeSaved | ra.bit());
1401  // Return.
1402  __ Jump(ra);
1403 }
1404 
1405 
1406 // Uses registers a0 to t0.
1407 // Expected input (depending on whether args are in registers or on the stack):
1408 // * object: a0 or at sp + 1 * kPointerSize.
1409 // * function: a1 or at sp.
1410 //
1411 // An inlined call site may have been generated before calling this stub.
1412 // In this case the offset to the inline site to patch is passed on the stack,
1413 // in the safepoint slot for register t0.
1414 void InstanceofStub::Generate(MacroAssembler* masm) {
1415  // Call site inlining and patching implies arguments in registers.
1417 
1418  // Fixed register usage throughout the stub:
1419  const Register object = a0; // Object (lhs).
1420  Register map = a3; // Map of the object.
1421  const Register function = a1; // Function (rhs).
1422  const Register prototype = t0; // Prototype of the function.
1423  const Register inline_site = t5;
1424  const Register scratch = a2;
1425 
1426  const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
1427 
1428  Label slow, loop, is_instance, is_not_instance, not_js_object;
1429 
1430  if (!HasArgsInRegisters()) {
1431  __ lw(object, MemOperand(sp, 1 * kPointerSize));
1432  __ lw(function, MemOperand(sp, 0));
1433  }
1434 
1435  // Check that the left hand is a JS object and load map.
1436  __ JumpIfSmi(object, &not_js_object);
1437  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
1438 
1439  // If there is a call site cache don't look in the global cache, but do the
1440  // real lookup and update the call site cache.
1442  Label miss;
1443  __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
1444  __ Branch(&miss, ne, function, Operand(at));
1445  __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
1446  __ Branch(&miss, ne, map, Operand(at));
1447  __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1448  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1449 
1450  __ bind(&miss);
1451  }
1452 
1453  // Get the prototype of the function.
1454  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1455 
1456  // Check that the function prototype is a JS object.
1457  __ JumpIfSmi(prototype, &slow);
1458  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1459 
1460  // Update the global instanceof or call site inlined cache with the current
1461  // map and function. The cached answer will be set when it is known below.
1462  if (!HasCallSiteInlineCheck()) {
1463  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1464  __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1465  } else {
1467  // Patch the (relocated) inlined map check.
1468 
1469  // The offset was stored in t0 safepoint slot.
1470  // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1471  __ LoadFromSafepointRegisterSlot(scratch, t0);
1472  __ Subu(inline_site, ra, scratch);
1473  // Get the map location in scratch and patch it.
1474  __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
1475  __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
1476  }
1477 
1478  // Register mapping: a3 is object map and t0 is function prototype.
1479  // Get prototype of object into a2.
1481 
1482  // We don't need map any more. Use it as a scratch register.
1483  Register scratch2 = map;
1484  map = no_reg;
1485 
1486  // Loop through the prototype chain looking for the function prototype.
1487  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1488  __ bind(&loop);
1489  __ Branch(&is_instance, eq, scratch, Operand(prototype));
1490  __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
1491  __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1492  __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1493  __ Branch(&loop);
1494 
1495  __ bind(&is_instance);
1496  DCHECK(Smi::FromInt(0) == 0);
1497  if (!HasCallSiteInlineCheck()) {
1498  __ mov(v0, zero_reg);
1499  __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1500  if (ReturnTrueFalseObject()) {
1501  __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1502  }
1503  } else {
1504  // Patch the call site to return true.
1505  __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1506  __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1507  // Get the boolean result location in scratch and patch it.
1508  __ PatchRelocatedValue(inline_site, scratch, v0);
1509 
1510  if (!ReturnTrueFalseObject()) {
1511  DCHECK_EQ(Smi::FromInt(0), 0);
1512  __ mov(v0, zero_reg);
1513  }
1514  }
1515  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1516 
1517  __ bind(&is_not_instance);
1518  if (!HasCallSiteInlineCheck()) {
1519  __ li(v0, Operand(Smi::FromInt(1)));
1520  __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1521  if (ReturnTrueFalseObject()) {
1522  __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1523  }
1524  } else {
1525  // Patch the call site to return false.
1526  __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1527  __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1528  // Get the boolean result location in scratch and patch it.
1529  __ PatchRelocatedValue(inline_site, scratch, v0);
1530 
1531  if (!ReturnTrueFalseObject()) {
1532  __ li(v0, Operand(Smi::FromInt(1)));
1533  }
1534  }
1535 
1536  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1537 
1538  Label object_not_null, object_not_null_or_smi;
1539  __ bind(&not_js_object);
1540  // Before null, smi and string value checks, check that the rhs is a function
1541  // as for a non-function rhs an exception needs to be thrown.
1542  __ JumpIfSmi(function, &slow);
1543  __ GetObjectType(function, scratch2, scratch);
1544  __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
1545 
1546  // Null is not instance of anything.
1547  __ Branch(&object_not_null,
1548  ne,
1549  scratch,
1550  Operand(isolate()->factory()->null_value()));
1551  if (ReturnTrueFalseObject()) {
1552  __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1553  } else {
1554  __ li(v0, Operand(Smi::FromInt(1)));
1555  }
1556  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1557 
1558  __ bind(&object_not_null);
1559  // Smi values are not instances of anything.
1560  __ JumpIfNotSmi(object, &object_not_null_or_smi);
1561  if (ReturnTrueFalseObject()) {
1562  __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1563  } else {
1564  __ li(v0, Operand(Smi::FromInt(1)));
1565  }
1566  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1567 
1568  __ bind(&object_not_null_or_smi);
1569  // String values are not instances of anything.
1570  __ IsObjectJSStringType(object, scratch, &slow);
1571  if (ReturnTrueFalseObject()) {
1572  __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1573  } else {
1574  __ li(v0, Operand(Smi::FromInt(1)));
1575  }
1576  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1577 
1578  // Slow-case. Tail call builtin.
1579  __ bind(&slow);
1580  if (!ReturnTrueFalseObject()) {
1581  if (HasArgsInRegisters()) {
1582  __ Push(a0, a1);
1583  }
1584  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
1585  } else {
1586  {
1587  FrameScope scope(masm, StackFrame::INTERNAL);
1588  __ Push(a0, a1);
1589  __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1590  }
1591  __ mov(a0, v0);
1592  __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1593  __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
1594  __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1595  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1596  }
1597 }
1598 
1599 
1600 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1601  Label miss;
1602  Register receiver = LoadDescriptor::ReceiverRegister();
1604  t0, &miss);
1605  __ bind(&miss);
1606  PropertyAccessCompiler::TailCallBuiltin(
1607  masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1608 }
1609 
1610 
1611 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1612  // The displacement is the offset of the last parameter (if any)
1613  // relative to the frame pointer.
1614  const int kDisplacement =
1618 
1619  // Check that the key is a smiGenerateReadElement.
1620  Label slow;
1621  __ JumpIfNotSmi(a1, &slow);
1622 
1623  // Check if the calling frame is an arguments adaptor frame.
1624  Label adaptor;
1627  __ Branch(&adaptor,
1628  eq,
1629  a3,
1631 
1632  // Check index (a1) against formal parameters count limit passed in
1633  // through register a0. Use unsigned comparison to get negative
1634  // check for free.
1635  __ Branch(&slow, hs, a1, Operand(a0));
1636 
1637  // Read the argument from the stack and return it.
1638  __ subu(a3, a0, a1);
1639  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
1640  __ Addu(a3, fp, Operand(t3));
1641  __ Ret(USE_DELAY_SLOT);
1642  __ lw(v0, MemOperand(a3, kDisplacement));
1643 
1644  // Arguments adaptor case: Check index (a1) against actual arguments
1645  // limit found in the arguments adaptor frame. Use unsigned
1646  // comparison to get negative check for free.
1647  __ bind(&adaptor);
1649  __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
1650 
1651  // Read the argument from the adaptor frame and return it.
1652  __ subu(a3, a0, a1);
1653  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
1654  __ Addu(a3, a2, Operand(t3));
1655  __ Ret(USE_DELAY_SLOT);
1656  __ lw(v0, MemOperand(a3, kDisplacement));
1657 
1658  // Slow-case: Handle non-smi or out-of-bounds access to arguments
1659  // by calling the runtime system.
1660  __ bind(&slow);
1661  __ push(a1);
1662  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1663 }
1664 
1665 
1666 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1667  // sp[0] : number of parameters
1668  // sp[4] : receiver displacement
1669  // sp[8] : function
1670  // Check if the calling frame is an arguments adaptor frame.
1671  Label runtime;
1674  __ Branch(&runtime,
1675  ne,
1676  a2,
1678 
1679  // Patch the arguments.length and the parameters pointer in the current frame.
1681  __ sw(a2, MemOperand(sp, 0 * kPointerSize));
1682  __ sll(t3, a2, 1);
1683  __ Addu(a3, a3, Operand(t3));
1685  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
1686 
1687  __ bind(&runtime);
1688  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1689 }
1690 
1691 
1692 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1693  // Stack layout:
1694  // sp[0] : number of parameters (tagged)
1695  // sp[4] : address of receiver argument
1696  // sp[8] : function
1697  // Registers used over whole function:
1698  // t2 : allocated object (tagged)
1699  // t5 : mapped parameter count (tagged)
1700 
1701  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
1702  // a1 = parameter count (tagged)
1703 
1704  // Check if the calling frame is an arguments adaptor frame.
1705  Label runtime;
1706  Label adaptor_frame, try_allocate;
1709  __ Branch(&adaptor_frame,
1710  eq,
1711  a2,
1713 
1714  // No adaptor, parameter count = argument count.
1715  __ mov(a2, a1);
1716  __ b(&try_allocate);
1717  __ nop(); // Branch delay slot nop.
1718 
1719  // We have an adaptor frame. Patch the parameters pointer.
1720  __ bind(&adaptor_frame);
1722  __ sll(t6, a2, 1);
1723  __ Addu(a3, a3, Operand(t6));
1724  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1725  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
1726 
1727  // a1 = parameter count (tagged)
1728  // a2 = argument count (tagged)
1729  // Compute the mapped parameter count = min(a1, a2) in a1.
1730  Label skip_min;
1731  __ Branch(&skip_min, lt, a1, Operand(a2));
1732  __ mov(a1, a2);
1733  __ bind(&skip_min);
1734 
1735  __ bind(&try_allocate);
1736 
1737  // Compute the sizes of backing store, parameter map, and arguments object.
1738  // 1. Parameter map, has 2 extra words containing context and backing store.
1739  const int kParameterMapHeaderSize =
1741  // If there are no mapped parameters, we do not need the parameter_map.
1742  Label param_map_size;
1743  DCHECK_EQ(0, Smi::FromInt(0));
1744  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
1745  __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
1746  __ sll(t5, a1, 1);
1747  __ addiu(t5, t5, kParameterMapHeaderSize);
1748  __ bind(&param_map_size);
1749 
1750  // 2. Backing store.
1751  __ sll(t6, a2, 1);
1752  __ Addu(t5, t5, Operand(t6));
1753  __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
1754 
1755  // 3. Arguments object.
1756  __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
1757 
1758  // Do the allocation of all three objects in one go.
1759  __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
1760 
1761  // v0 = address of new object(s) (tagged)
1762  // a2 = argument count (smi-tagged)
1763  // Get the arguments boilerplate from the current native context into t0.
1764  const int kNormalOffset =
1766  const int kAliasedOffset =
1768 
1771  Label skip2_ne, skip2_eq;
1772  __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
1773  __ lw(t0, MemOperand(t0, kNormalOffset));
1774  __ bind(&skip2_ne);
1775 
1776  __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
1777  __ lw(t0, MemOperand(t0, kAliasedOffset));
1778  __ bind(&skip2_eq);
1779 
1780  // v0 = address of new object (tagged)
1781  // a1 = mapped parameter count (tagged)
1782  // a2 = argument count (smi-tagged)
1783  // t0 = address of arguments map (tagged)
1785  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
1788 
1789  // Set up the callee in-object property.
1791  __ lw(a3, MemOperand(sp, 2 * kPointerSize));
1792  __ AssertNotSmi(a3);
1793  const int kCalleeOffset = JSObject::kHeaderSize +
1795  __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
1796 
1797  // Use the length (smi tagged) and set that as an in-object property too.
1798  __ AssertSmi(a2);
1800  const int kLengthOffset = JSObject::kHeaderSize +
1802  __ sw(a2, FieldMemOperand(v0, kLengthOffset));
1803 
1804  // Set up the elements pointer in the allocated arguments object.
1805  // If we allocated a parameter map, t0 will point there, otherwise
1806  // it will point to the backing store.
1807  __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
1809 
1810  // v0 = address of new object (tagged)
1811  // a1 = mapped parameter count (tagged)
1812  // a2 = argument count (tagged)
1813  // t0 = address of parameter map or backing store (tagged)
1814  // Initialize parameter map. If there are no mapped arguments, we're done.
1815  Label skip_parameter_map;
1816  Label skip3;
1817  __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
1818  // Move backing store address to a3, because it is
1819  // expected there when filling in the unmapped arguments.
1820  __ mov(a3, t0);
1821  __ bind(&skip3);
1822 
1823  __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
1824 
1825  __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
1827  __ Addu(t2, a1, Operand(Smi::FromInt(2)));
1830  __ sll(t6, a1, 1);
1831  __ Addu(t2, t0, Operand(t6));
1832  __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
1834 
1835  // Copy the parameter slots and the holes in the arguments.
1836  // We need to fill in mapped_parameter_count slots. They index the context,
1837  // where parameters are stored in reverse order, at
1838  // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1839  // The mapped parameter thus need to get indices
1840  // MIN_CONTEXT_SLOTS+parameter_count-1 ..
1841  // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1842  // We loop from right to left.
1843  Label parameters_loop, parameters_test;
1844  __ mov(t2, a1);
1845  __ lw(t5, MemOperand(sp, 0 * kPointerSize));
1846  __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
1847  __ Subu(t5, t5, Operand(a1));
1848  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
1849  __ sll(t6, t2, 1);
1850  __ Addu(a3, t0, Operand(t6));
1851  __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
1852 
1853  // t2 = loop variable (tagged)
1854  // a1 = mapping index (tagged)
1855  // a3 = address of backing store (tagged)
1856  // t0 = address of parameter map (tagged)
1857  // t1 = temporary scratch (a.o., for address calculation)
1858  // t3 = the hole value
1859  __ jmp(&parameters_test);
1860 
1861  __ bind(&parameters_loop);
1862  __ Subu(t2, t2, Operand(Smi::FromInt(1)));
1863  __ sll(t1, t2, 1);
1864  __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
1865  __ Addu(t6, t0, t1);
1866  __ sw(t5, MemOperand(t6));
1867  __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
1868  __ Addu(t6, a3, t1);
1869  __ sw(t3, MemOperand(t6));
1870  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
1871  __ bind(&parameters_test);
1872  __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
1873 
1874  __ bind(&skip_parameter_map);
1875  // a2 = argument count (tagged)
1876  // a3 = address of backing store (tagged)
1877  // t1 = scratch
1878  // Copy arguments header and remaining slots (if there are any).
1879  __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
1882 
1883  Label arguments_loop, arguments_test;
1884  __ mov(t5, a1);
1885  __ lw(t0, MemOperand(sp, 1 * kPointerSize));
1886  __ sll(t6, t5, 1);
1887  __ Subu(t0, t0, Operand(t6));
1888  __ jmp(&arguments_test);
1889 
1890  __ bind(&arguments_loop);
1891  __ Subu(t0, t0, Operand(kPointerSize));
1892  __ lw(t2, MemOperand(t0, 0));
1893  __ sll(t6, t5, 1);
1894  __ Addu(t1, a3, Operand(t6));
1896  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
1897 
1898  __ bind(&arguments_test);
1899  __ Branch(&arguments_loop, lt, t5, Operand(a2));
1900 
1901  // Return and remove the on-stack parameters.
1902  __ DropAndRet(3);
1903 
1904  // Do the runtime call to allocate the arguments object.
1905  // a2 = argument count (tagged)
1906  __ bind(&runtime);
1907  __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
1908  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1909 }
1910 
1911 
1912 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1913  // Return address is in ra.
1914  Label slow;
1915 
1916  Register receiver = LoadDescriptor::ReceiverRegister();
1917  Register key = LoadDescriptor::NameRegister();
1918 
1919  // Check that the key is an array index, that is Uint32.
1920  __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
1921  __ Branch(&slow, ne, t0, Operand(zero_reg));
1922 
1923  // Everything is fine, call runtime.
1924  __ Push(receiver, key); // Receiver, key.
1925 
1926  // Perform tail call to the entry.
1927  __ TailCallExternalReference(
1928  ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
1929  masm->isolate()),
1930  2, 1);
1931 
1932  __ bind(&slow);
1933  PropertyAccessCompiler::TailCallBuiltin(
1934  masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1935 }
1936 
1937 
1938 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1939  // sp[0] : number of parameters
1940  // sp[4] : receiver displacement
1941  // sp[8] : function
1942  // Check if the calling frame is an arguments adaptor frame.
1943  Label adaptor_frame, try_allocate, runtime;
1946  __ Branch(&adaptor_frame,
1947  eq,
1948  a3,
1950 
1951  // Get the length from the frame.
1952  __ lw(a1, MemOperand(sp, 0));
1953  __ Branch(&try_allocate);
1954 
1955  // Patch the arguments.length and the parameters pointer.
1956  __ bind(&adaptor_frame);
1958  __ sw(a1, MemOperand(sp, 0));
1959  __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
1960  __ Addu(a3, a2, Operand(at));
1961 
1962  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1963  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
1964 
1965  // Try the new space allocation. Start out with computing the size
1966  // of the arguments object and the elements array in words.
1967  Label add_arguments_object;
1968  __ bind(&try_allocate);
1969  __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
1970  __ srl(a1, a1, kSmiTagSize);
1971 
1972  __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
1973  __ bind(&add_arguments_object);
1974  __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1975 
1976  // Do the allocation of both objects in one go.
1977  __ Allocate(a1, v0, a2, a3, &runtime,
1978  static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1979 
1980  // Get the arguments boilerplate from the current native context.
1983  __ lw(t0, MemOperand(
1985 
1987  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
1990 
1991  // Get the length (smi tagged) and set that as an in-object property too.
1993  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
1994  __ AssertSmi(a1);
1997 
1998  Label done;
1999  __ Branch(&done, eq, a1, Operand(zero_reg));
2000 
2001  // Get the parameters pointer from the stack.
2002  __ lw(a2, MemOperand(sp, 1 * kPointerSize));
2003 
2004  // Set up the elements pointer in the allocated arguments object and
2005  // initialize the header in the elements fixed array.
2006  __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
2008  __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
2011  // Untag the length for the loop.
2012  __ srl(a1, a1, kSmiTagSize);
2013 
2014  // Copy the fixed array slots.
2015  Label loop;
2016  // Set up t0 to point to the first array slot.
2017  __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2018  __ bind(&loop);
2019  // Pre-decrement a2 with kPointerSize on each iteration.
2020  // Pre-decrement in order to skip receiver.
2021  __ Addu(a2, a2, Operand(-kPointerSize));
2022  __ lw(a3, MemOperand(a2));
2023  // Post-increment t0 with kPointerSize on each iteration.
2024  __ sw(a3, MemOperand(t0));
2025  __ Addu(t0, t0, Operand(kPointerSize));
2026  __ Subu(a1, a1, Operand(1));
2027  __ Branch(&loop, ne, a1, Operand(zero_reg));
2028 
2029  // Return and remove the on-stack parameters.
2030  __ bind(&done);
2031  __ DropAndRet(3);
2032 
2033  // Do the runtime call to allocate the arguments object.
2034  __ bind(&runtime);
2035  __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
2036 }
2037 
2038 
2039 void RegExpExecStub::Generate(MacroAssembler* masm) {
2040  // Just jump directly to runtime if native RegExp is not selected at compile
2041  // time or if regexp entry in generated code is turned off runtime switch or
2042  // at compilation.
2043 #ifdef V8_INTERPRETED_REGEXP
2044  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2045 #else // V8_INTERPRETED_REGEXP
2046 
2047  // Stack frame on entry.
2048  // sp[0]: last_match_info (expected JSArray)
2049  // sp[4]: previous index
2050  // sp[8]: subject string
2051  // sp[12]: JSRegExp object
2052 
2053  const int kLastMatchInfoOffset = 0 * kPointerSize;
2054  const int kPreviousIndexOffset = 1 * kPointerSize;
2055  const int kSubjectOffset = 2 * kPointerSize;
2056  const int kJSRegExpOffset = 3 * kPointerSize;
2057 
2058  Label runtime;
2059  // Allocation of registers for this function. These are in callee save
2060  // registers and will be preserved by the call to the native RegExp code, as
2061  // this code is called using the normal C calling convention. When calling
2062  // directly from generated code the native RegExp code will not do a GC and
2063  // therefore the content of these registers are safe to use after the call.
2064  // MIPS - using s0..s2, since we are not using CEntry Stub.
2065  Register subject = s0;
2066  Register regexp_data = s1;
2067  Register last_match_info_elements = s2;
2068 
2069  // Ensure that a RegExp stack is allocated.
2070  ExternalReference address_of_regexp_stack_memory_address =
2071  ExternalReference::address_of_regexp_stack_memory_address(
2072  isolate());
2073  ExternalReference address_of_regexp_stack_memory_size =
2074  ExternalReference::address_of_regexp_stack_memory_size(isolate());
2075  __ li(a0, Operand(address_of_regexp_stack_memory_size));
2076  __ lw(a0, MemOperand(a0, 0));
2077  __ Branch(&runtime, eq, a0, Operand(zero_reg));
2078 
2079  // Check that the first argument is a JSRegExp object.
2080  __ lw(a0, MemOperand(sp, kJSRegExpOffset));
2081  STATIC_ASSERT(kSmiTag == 0);
2082  __ JumpIfSmi(a0, &runtime);
2083  __ GetObjectType(a0, a1, a1);
2084  __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2085 
2086  // Check that the RegExp has been compiled (data contains a fixed array).
2087  __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2088  if (FLAG_debug_code) {
2089  __ SmiTst(regexp_data, t0);
2090  __ Check(nz,
2091  kUnexpectedTypeForRegExpDataFixedArrayExpected,
2092  t0,
2093  Operand(zero_reg));
2094  __ GetObjectType(regexp_data, a0, a0);
2095  __ Check(eq,
2096  kUnexpectedTypeForRegExpDataFixedArrayExpected,
2097  a0,
2098  Operand(FIXED_ARRAY_TYPE));
2099  }
2100 
2101  // regexp_data: RegExp data (FixedArray)
2102  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2103  __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2104  __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2105 
2106  // regexp_data: RegExp data (FixedArray)
2107  // Check that the number of captures fit in the static offsets vector buffer.
2108  __ lw(a2,
2110  // Check (number_of_captures + 1) * 2 <= offsets vector size
2111  // Or number_of_captures * 2 <= offsets vector size - 2
2112  // Multiplying by 2 comes for free since a2 is smi-tagged.
2113  STATIC_ASSERT(kSmiTag == 0);
2116  __ Branch(
2117  &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2118 
2119  // Reset offset for possibly sliced string.
2120  __ mov(t0, zero_reg);
2121  __ lw(subject, MemOperand(sp, kSubjectOffset));
2122  __ JumpIfSmi(subject, &runtime);
2123  __ mov(a3, subject); // Make a copy of the original subject string.
2124  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2126  // subject: subject string
2127  // a3: subject string
2128  // a0: subject string instance type
2129  // regexp_data: RegExp data (FixedArray)
2130  // Handle subject string according to its encoding and representation:
2131  // (1) Sequential string? If yes, go to (5).
2132  // (2) Anything but sequential or cons? If yes, go to (6).
2133  // (3) Cons string. If the string is flat, replace subject with first string.
2134  // Otherwise bailout.
2135  // (4) Is subject external? If yes, go to (7).
2136  // (5) Sequential string. Load regexp code according to encoding.
2137  // (E) Carry on.
2138  /// [...]
2139 
2140  // Deferred code at the end of the stub:
2141  // (6) Not a long external string? If yes, go to (8).
2142  // (7) External string. Make it, offset-wise, look like a sequential string.
2143  // Go to (5).
2144  // (8) Short external string or not a string? If yes, bail out to runtime.
2145  // (9) Sliced string. Replace subject with parent. Go to (4).
2146 
2147  Label seq_string /* 5 */, external_string /* 7 */,
2148  check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2149  not_long_external /* 8 */;
2150 
2151  // (1) Sequential string? If yes, go to (5).
2152  __ And(a1,
2153  a0,
2154  Operand(kIsNotStringMask |
2158  __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
2159 
2160  // (2) Anything but sequential or cons? If yes, go to (6).
2165  // Go to (6).
2166  __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
2167 
2168  // (3) Cons string. Check that it's flat.
2169  // Replace subject with first string and reload instance type.
2170  __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
2171  __ LoadRoot(a1, Heap::kempty_stringRootIndex);
2172  __ Branch(&runtime, ne, a0, Operand(a1));
2173  __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2174 
2175  // (4) Is subject external? If yes, go to (7).
2176  __ bind(&check_underlying);
2177  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2180  __ And(at, a0, Operand(kStringRepresentationMask));
2181  // The underlying external string is never a short external string.
2184  __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
2185 
2186  // (5) Sequential string. Load regexp code according to encoding.
2187  __ bind(&seq_string);
2188  // subject: sequential subject string (or look-alike, external string)
2189  // a3: original subject string
2190  // Load previous index and check range before a3 is overwritten. We have to
2191  // use a3 instead of subject here because subject might have been only made
2192  // to look like a sequential string when it actually is an external string.
2193  __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
2194  __ JumpIfNotSmi(a1, &runtime);
2196  __ Branch(&runtime, ls, a3, Operand(a1));
2197  __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
2198 
2202  __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for one-byte.
2203  __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
2204  __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
2205  __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
2206  __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
2207 
2208  // (E) Carry on. String handling is done.
2209  // t9: irregexp code
2210  // Check that the irregexp code has been generated for the actual string
2211  // encoding. If it has, the field contains a code object otherwise it contains
2212  // a smi (code flushing support).
2213  __ JumpIfSmi(t9, &runtime);
2214 
2215  // a1: previous index
2216  // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
2217  // t9: code
2218  // subject: Subject string
2219  // regexp_data: RegExp data (FixedArray)
2220  // All checks done. Now push arguments for native regexp code.
2221  __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
2222  1, a0, a2);
2223 
2224  // Isolates: note we add an additional parameter here (isolate pointer).
2225  const int kRegExpExecuteArguments = 9;
2226  const int kParameterRegisters = 4;
2227  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2228 
2229  // Stack pointer now points to cell where return address is to be written.
2230  // Arguments are before that on the stack or in registers, meaning we
2231  // treat the return address as argument 5. Thus every argument after that
2232  // needs to be shifted back by 1. Since DirectCEntryStub will handle
2233  // allocating space for the c argument slots, we don't need to calculate
2234  // that into the argument positions on the stack. This is how the stack will
2235  // look (sp meaning the value of sp at this moment):
2236  // [sp + 5] - Argument 9
2237  // [sp + 4] - Argument 8
2238  // [sp + 3] - Argument 7
2239  // [sp + 2] - Argument 6
2240  // [sp + 1] - Argument 5
2241  // [sp + 0] - saved ra
2242 
2243  // Argument 9: Pass current isolate address.
2244  // CFunctionArgumentOperand handles MIPS stack argument slots.
2245  __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
2246  __ sw(a0, MemOperand(sp, 5 * kPointerSize));
2247 
2248  // Argument 8: Indicate that this is a direct call from JavaScript.
2249  __ li(a0, Operand(1));
2250  __ sw(a0, MemOperand(sp, 4 * kPointerSize));
2251 
2252  // Argument 7: Start (high end) of backtracking stack memory area.
2253  __ li(a0, Operand(address_of_regexp_stack_memory_address));
2254  __ lw(a0, MemOperand(a0, 0));
2255  __ li(a2, Operand(address_of_regexp_stack_memory_size));
2256  __ lw(a2, MemOperand(a2, 0));
2257  __ addu(a0, a0, a2);
2258  __ sw(a0, MemOperand(sp, 3 * kPointerSize));
2259 
2260  // Argument 6: Set the number of capture registers to zero to force global
2261  // regexps to behave as non-global. This does not affect non-global regexps.
2262  __ mov(a0, zero_reg);
2263  __ sw(a0, MemOperand(sp, 2 * kPointerSize));
2264 
2265  // Argument 5: static offsets vector buffer.
2266  __ li(a0, Operand(
2267  ExternalReference::address_of_static_offsets_vector(isolate())));
2268  __ sw(a0, MemOperand(sp, 1 * kPointerSize));
2269 
2270  // For arguments 4 and 3 get string length, calculate start of string data
2271  // calculate the shift of the index (0 for one-byte and 1 for two-byte).
2272  __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2273  __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
2274  // Load the length from the original subject string from the previous stack
2275  // frame. Therefore we have to use fp, which points exactly to two pointer
2276  // sizes below the previous sp. (Because creating a new stack frame pushes
2277  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2278  __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2279  // If slice offset is not 0, load the length from the original sliced string.
2280  // Argument 4, a3: End of string data
2281  // Argument 3, a2: Start of string data
2282  // Prepare start and end index of the input.
2283  __ sllv(t1, t0, a3);
2284  __ addu(t0, t2, t1);
2285  __ sllv(t1, a1, a3);
2286  __ addu(a2, t0, t1);
2287 
2288  __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
2289  __ sra(t2, t2, kSmiTagSize);
2290  __ sllv(t1, t2, a3);
2291  __ addu(a3, t0, t1);
2292  // Argument 2 (a1): Previous index.
2293  // Already there
2294 
2295  // Argument 1 (a0): Subject string.
2296  __ mov(a0, subject);
2297 
2298  // Locate the code entry and call it.
2299  __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
2300  DirectCEntryStub stub(isolate());
2301  stub.GenerateCall(masm, t9);
2302 
2303  __ LeaveExitFrame(false, no_reg, true);
2304 
2305  // v0: result
2306  // subject: subject string (callee saved)
2307  // regexp_data: RegExp data (callee saved)
2308  // last_match_info_elements: Last match info elements (callee saved)
2309  // Check the result.
2310  Label success;
2311  __ Branch(&success, eq, v0, Operand(1));
2312  // We expect exactly one result since we force the called regexp to behave
2313  // as non-global.
2314  Label failure;
2315  __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
2316  // If not exception it can only be retry. Handle that in the runtime system.
2317  __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2318  // Result must now be exception. If there is no pending exception already a
2319  // stack overflow (on the backtrack stack) was detected in RegExp code but
2320  // haven't created the exception yet. Handle that in the runtime system.
2321  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2322  __ li(a1, Operand(isolate()->factory()->the_hole_value()));
2323  __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2324  isolate())));
2325  __ lw(v0, MemOperand(a2, 0));
2326  __ Branch(&runtime, eq, v0, Operand(a1));
2327 
2328  __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
2329 
2330  // Check if the exception is a termination. If so, throw as uncatchable.
2331  __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
2332  Label termination_exception;
2333  __ Branch(&termination_exception, eq, v0, Operand(a0));
2334 
2335  __ Throw(v0);
2336 
2337  __ bind(&termination_exception);
2338  __ ThrowUncatchable(v0);
2339 
2340  __ bind(&failure);
2341  // For failure and exception return null.
2342  __ li(v0, Operand(isolate()->factory()->null_value()));
2343  __ DropAndRet(4);
2344 
2345  // Process the result from the native regexp code.
2346  __ bind(&success);
2347  __ lw(a1,
2349  // Calculate number of capture registers (number_of_captures + 1) * 2.
2350  // Multiplying by 2 comes for free since r1 is smi-tagged.
2351  STATIC_ASSERT(kSmiTag == 0);
2353  __ Addu(a1, a1, Operand(2)); // a1 was a smi.
2354 
2355  __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
2356  __ JumpIfSmi(a0, &runtime);
2357  __ GetObjectType(a0, a2, a2);
2358  __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
2359  // Check that the JSArray is in fast case.
2360  __ lw(last_match_info_elements,
2362  __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2363  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2364  __ Branch(&runtime, ne, a0, Operand(at));
2365  // Check that the last match info has space for the capture registers and the
2366  // additional information.
2367  __ lw(a0,
2368  FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2369  __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
2370  __ sra(at, a0, kSmiTagSize);
2371  __ Branch(&runtime, gt, a2, Operand(at));
2372 
2373  // a1: number of capture registers
2374  // subject: subject string
2375  // Store the capture count.
2376  __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
2377  __ sw(a2, FieldMemOperand(last_match_info_elements,
2379  // Store last subject and last input.
2380  __ sw(subject,
2381  FieldMemOperand(last_match_info_elements,
2383  __ mov(a2, subject);
2384  __ RecordWriteField(last_match_info_elements,
2386  subject,
2387  t3,
2389  kDontSaveFPRegs);
2390  __ mov(subject, a2);
2391  __ sw(subject,
2392  FieldMemOperand(last_match_info_elements,
2394  __ RecordWriteField(last_match_info_elements,
2396  subject,
2397  t3,
2399  kDontSaveFPRegs);
2400 
2401  // Get the static offsets vector filled by the native regexp code.
2402  ExternalReference address_of_static_offsets_vector =
2403  ExternalReference::address_of_static_offsets_vector(isolate());
2404  __ li(a2, Operand(address_of_static_offsets_vector));
2405 
2406  // a1: number of capture registers
2407  // a2: offsets vector
2408  Label next_capture, done;
2409  // Capture register counter starts from number of capture registers and
2410  // counts down until wrapping after zero.
2411  __ Addu(a0,
2412  last_match_info_elements,
2414  __ bind(&next_capture);
2415  __ Subu(a1, a1, Operand(1));
2416  __ Branch(&done, lt, a1, Operand(zero_reg));
2417  // Read the value from the static offsets vector buffer.
2418  __ lw(a3, MemOperand(a2, 0));
2419  __ addiu(a2, a2, kPointerSize);
2420  // Store the smi value in the last match info.
2421  __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
2422  __ sw(a3, MemOperand(a0, 0));
2423  __ Branch(&next_capture, USE_DELAY_SLOT);
2424  __ addiu(a0, a0, kPointerSize); // In branch delay slot.
2425 
2426  __ bind(&done);
2427 
2428  // Return last match info.
2429  __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
2430  __ DropAndRet(4);
2431 
2432  // Do the runtime call to execute the regexp.
2433  __ bind(&runtime);
2434  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2435 
2436  // Deferred code for string handling.
2437  // (6) Not a long external string? If yes, go to (8).
2438  __ bind(&not_seq_nor_cons);
2439  // Go to (8).
2440  __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
2441 
2442  // (7) External string. Make it, offset-wise, look like a sequential string.
2443  __ bind(&external_string);
2444  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2446  if (FLAG_debug_code) {
2447  // Assert that we do not have a cons or slice (indirect strings) here.
2448  // Sequential strings have already been ruled out.
2449  __ And(at, a0, Operand(kIsIndirectStringMask));
2450  __ Assert(eq,
2451  kExternalStringExpectedButNotFound,
2452  at,
2453  Operand(zero_reg));
2454  }
2455  __ lw(subject,
2457  // Move the pointer so that offset-wise, it looks like a sequential string.
2459  __ Subu(subject,
2460  subject,
2462  __ jmp(&seq_string); // Go to (5).
2463 
2464  // (8) Short external string or not a string? If yes, bail out to runtime.
2465  __ bind(&not_long_external);
2467  __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
2468  __ Branch(&runtime, ne, at, Operand(zero_reg));
2469 
2470  // (9) Sliced string. Replace subject with parent. Go to (4).
2471  // Load offset into t0 and replace subject string with parent.
2473  __ sra(t0, t0, kSmiTagSize);
2474  __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2475  __ jmp(&check_underlying); // Go to (4).
2476 #endif // V8_INTERPRETED_REGEXP
2477 }
2478 
2479 
2480 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2481  // Cache the called function in a feedback vector slot. Cache states
2482  // are uninitialized, monomorphic (indicated by a JSFunction), and
2483  // megamorphic.
2484  // a0 : number of arguments to the construct function
2485  // a1 : the function to call
2486  // a2 : Feedback vector
2487  // a3 : slot in feedback vector (Smi)
2488  Label initialize, done, miss, megamorphic, not_array_function;
2489 
2491  masm->isolate()->heap()->megamorphic_symbol());
2493  masm->isolate()->heap()->uninitialized_symbol());
2494 
2495  // Load the cache state into t0.
2496  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2497  __ Addu(t0, a2, Operand(t0));
2499 
2500  // A monomorphic cache hit or an already megamorphic state: invoke the
2501  // function without changing the state.
2502  __ Branch(&done, eq, t0, Operand(a1));
2503 
2504  if (!FLAG_pretenuring_call_new) {
2505  // If we came here, we need to see if we are the array function.
2506  // If we didn't have a matching function, and we didn't find the megamorph
2507  // sentinel, then we have in the slot either some other function or an
2508  // AllocationSite. Do a map check on the object in a3.
2509  __ lw(t1, FieldMemOperand(t0, 0));
2510  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2511  __ Branch(&miss, ne, t1, Operand(at));
2512 
2513  // Make sure the function is the Array() function
2514  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2515  __ Branch(&megamorphic, ne, a1, Operand(t0));
2516  __ jmp(&done);
2517  }
2518 
2519  __ bind(&miss);
2520 
2521  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2522  // megamorphic.
2523  __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
2524  __ Branch(&initialize, eq, t0, Operand(at));
2525  // MegamorphicSentinel is an immortal immovable object (undefined) so no
2526  // write-barrier is needed.
2527  __ bind(&megamorphic);
2528  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2529  __ Addu(t0, a2, Operand(t0));
2530  __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
2532  __ jmp(&done);
2533 
2534  // An uninitialized cache is patched with the function.
2535  __ bind(&initialize);
2536  if (!FLAG_pretenuring_call_new) {
2537  // Make sure the function is the Array() function.
2538  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2539  __ Branch(&not_array_function, ne, a1, Operand(t0));
2540 
2541  // The target function is the Array constructor,
2542  // Create an AllocationSite if we don't already have it, store it in the
2543  // slot.
2544  {
2545  FrameScope scope(masm, StackFrame::INTERNAL);
2546  const RegList kSavedRegs =
2547  1 << 4 | // a0
2548  1 << 5 | // a1
2549  1 << 6 | // a2
2550  1 << 7; // a3
2551 
2552  // Arguments register must be smi-tagged to call out.
2553  __ SmiTag(a0);
2554  __ MultiPush(kSavedRegs);
2555 
2556  CreateAllocationSiteStub create_stub(masm->isolate());
2557  __ CallStub(&create_stub);
2558 
2559  __ MultiPop(kSavedRegs);
2560  __ SmiUntag(a0);
2561  }
2562  __ Branch(&done);
2563 
2564  __ bind(&not_array_function);
2565  }
2566 
2567  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2568  __ Addu(t0, a2, Operand(t0));
2569  __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2570  __ sw(a1, MemOperand(t0, 0));
2571 
2572  __ Push(t0, a2, a1);
2573  __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
2575  __ Pop(t0, a2, a1);
2576 
2577  __ bind(&done);
2578 }
2579 
2580 
2581 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2584 
2585  // Do not transform the receiver for strict mode functions.
2586  int32_t strict_mode_function_mask =
2588  // Do not transform the receiver for native (Compilerhints already in a3).
2589  int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
2590  __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
2591  __ Branch(cont, ne, at, Operand(zero_reg));
2592 }
2593 
2594 
2595 static void EmitSlowCase(MacroAssembler* masm,
2596  int argc,
2597  Label* non_function) {
2598  // Check for function proxy.
2599  __ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
2600  __ push(a1); // put proxy as additional argument
2601  __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
2602  __ mov(a2, zero_reg);
2603  __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
2604  {
2605  Handle<Code> adaptor =
2606  masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2607  __ Jump(adaptor, RelocInfo::CODE_TARGET);
2608  }
2609 
2610  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2611  // of the original receiver from the call site).
2612  __ bind(non_function);
2613  __ sw(a1, MemOperand(sp, argc * kPointerSize));
2614  __ li(a0, Operand(argc)); // Set up the number of arguments.
2615  __ mov(a2, zero_reg);
2616  __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
2617  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2619 }
2620 
2621 
2622 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2623  // Wrap the receiver and patch it back onto the stack.
2624  { FrameScope frame_scope(masm, StackFrame::INTERNAL);
2625  __ Push(a1, a3);
2626  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2627  __ pop(a1);
2628  }
2629  __ Branch(USE_DELAY_SLOT, cont);
2630  __ sw(v0, MemOperand(sp, argc * kPointerSize));
2631 }
2632 
2633 
2634 static void CallFunctionNoFeedback(MacroAssembler* masm,
2635  int argc, bool needs_checks,
2636  bool call_as_method) {
2637  // a1 : the function to call
2638  Label slow, non_function, wrap, cont;
2639 
2640  if (needs_checks) {
2641  // Check that the function is really a JavaScript function.
2642  // a1: pushed function (to be verified)
2643  __ JumpIfSmi(a1, &non_function);
2644 
2645  // Goto slow case if we do not have a function.
2646  __ GetObjectType(a1, t0, t0);
2647  __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2648  }
2649 
2650  // Fast-case: Invoke the function now.
2651  // a1: pushed function
2652  ParameterCount actual(argc);
2653 
2654  if (call_as_method) {
2655  if (needs_checks) {
2656  EmitContinueIfStrictOrNative(masm, &cont);
2657  }
2658 
2659  // Compute the receiver in sloppy mode.
2660  __ lw(a3, MemOperand(sp, argc * kPointerSize));
2661 
2662  if (needs_checks) {
2663  __ JumpIfSmi(a3, &wrap);
2664  __ GetObjectType(a3, t0, t0);
2665  __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
2666  } else {
2667  __ jmp(&wrap);
2668  }
2669 
2670  __ bind(&cont);
2671  }
2672 
2673  __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2674 
2675  if (needs_checks) {
2676  // Slow-case: Non-function called.
2677  __ bind(&slow);
2678  EmitSlowCase(masm, argc, &non_function);
2679  }
2680 
2681  if (call_as_method) {
2682  __ bind(&wrap);
2683  // Wrap the receiver and patch it back onto the stack.
2684  EmitWrapCase(masm, argc, &cont);
2685  }
2686 }
2687 
2688 
2689 void CallFunctionStub::Generate(MacroAssembler* masm) {
2690  CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
2691 }
2692 
2693 
2694 void CallConstructStub::Generate(MacroAssembler* masm) {
2695  // a0 : number of arguments
2696  // a1 : the function to call
2697  // a2 : feedback vector
2698  // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
2699  Label slow, non_function_call;
2700 
2701  // Check that the function is not a smi.
2702  __ JumpIfSmi(a1, &non_function_call);
2703  // Check that the function is a JSFunction.
2704  __ GetObjectType(a1, t0, t0);
2705  __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2706 
2707  if (RecordCallTarget()) {
2708  GenerateRecordCallTarget(masm);
2709 
2710  __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2711  __ Addu(t1, a2, at);
2712  if (FLAG_pretenuring_call_new) {
2713  // Put the AllocationSite from the feedback vector into a2.
2714  // By adding kPointerSize we encode that we know the AllocationSite
2715  // entry is at the feedback vector slot given by a3 + 1.
2717  } else {
2718  Label feedback_register_initialized;
2719  // Put the AllocationSite from the feedback vector into a2, or undefined.
2722  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2723  __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
2724  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
2725  __ bind(&feedback_register_initialized);
2726  }
2727 
2728  __ AssertUndefinedOrAllocationSite(a2, t1);
2729  }
2730 
2731  // Jump to the function-specific construct stub.
2732  Register jmp_reg = t0;
2734  __ lw(jmp_reg, FieldMemOperand(jmp_reg,
2736  __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
2737  __ Jump(at);
2738 
2739  // a0: number of arguments
2740  // a1: called object
2741  // t0: object type
2742  Label do_call;
2743  __ bind(&slow);
2744  __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
2745  __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2746  __ jmp(&do_call);
2747 
2748  __ bind(&non_function_call);
2749  __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2750  __ bind(&do_call);
2751  // Set expected number of arguments to zero (not changing r0).
2752  __ li(a2, Operand(0, RelocInfo::NONE32));
2753  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2755 }
2756 
2757 
2758 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2760  __ lw(vector, FieldMemOperand(vector,
2762  __ lw(vector, FieldMemOperand(vector,
2764 }
2765 
2766 
2767 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2768  // a1 - function
2769  // a3 - slot id
2770  Label miss;
2771 
2772  EmitLoadTypeFeedbackVector(masm, a2);
2773 
2774  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
2775  __ Branch(&miss, ne, a1, Operand(at));
2776 
2777  __ li(a0, Operand(arg_count()));
2778  __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2779  __ Addu(at, a2, Operand(at));
2781 
2782  // Verify that t0 contains an AllocationSite
2784  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2785  __ Branch(&miss, ne, t1, Operand(at));
2786 
2787  __ mov(a2, t0);
2788  ArrayConstructorStub stub(masm->isolate(), arg_count());
2789  __ TailCallStub(&stub);
2790 
2791  __ bind(&miss);
2792  GenerateMiss(masm);
2793 
2794  // The slow case, we need this no matter what to complete a call after a miss.
2795  CallFunctionNoFeedback(masm,
2796  arg_count(),
2797  true,
2798  CallAsMethod());
2799 
2800  // Unreachable.
2801  __ stop("Unexpected code address");
2802 }
2803 
2804 
2805 void CallICStub::Generate(MacroAssembler* masm) {
2806  // r1 - function
2807  // r3 - slot id (Smi)
2808  Label extra_checks_or_miss, slow_start;
2809  Label slow, non_function, wrap, cont;
2810  Label have_js_function;
2811  int argc = arg_count();
2812  ParameterCount actual(argc);
2813 
2814  EmitLoadTypeFeedbackVector(masm, a2);
2815 
2816  // The checks. First, does r1 match the recorded monomorphic target?
2817  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2818  __ Addu(t0, a2, Operand(t0));
2820  __ Branch(&extra_checks_or_miss, ne, a1, Operand(t0));
2821 
2822  __ bind(&have_js_function);
2823  if (CallAsMethod()) {
2824  EmitContinueIfStrictOrNative(masm, &cont);
2825  // Compute the receiver in sloppy mode.
2826  __ lw(a3, MemOperand(sp, argc * kPointerSize));
2827 
2828  __ JumpIfSmi(a3, &wrap);
2829  __ GetObjectType(a3, t0, t0);
2830  __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
2831 
2832  __ bind(&cont);
2833  }
2834 
2835  __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2836 
2837  __ bind(&slow);
2838  EmitSlowCase(masm, argc, &non_function);
2839 
2840  if (CallAsMethod()) {
2841  __ bind(&wrap);
2842  EmitWrapCase(masm, argc, &cont);
2843  }
2844 
2845  __ bind(&extra_checks_or_miss);
2846  Label miss;
2847 
2848  __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
2849  __ Branch(&slow_start, eq, t0, Operand(at));
2850  __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
2851  __ Branch(&miss, eq, t0, Operand(at));
2852 
2853  if (!FLAG_trace_ic) {
2854  // We are going megamorphic. If the feedback is a JSFunction, it is fine
2855  // to handle it here. More complex cases are dealt with in the runtime.
2856  __ AssertNotSmi(t0);
2857  __ GetObjectType(t0, t1, t1);
2858  __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
2859  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2860  __ Addu(t0, a2, Operand(t0));
2861  __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
2863  __ Branch(&slow_start);
2864  }
2865 
2866  // We are here because tracing is on or we are going monomorphic.
2867  __ bind(&miss);
2868  GenerateMiss(masm);
2869 
2870  // the slow case
2871  __ bind(&slow_start);
2872  // Check that the function is really a JavaScript function.
2873  // r1: pushed function (to be verified)
2874  __ JumpIfSmi(a1, &non_function);
2875 
2876  // Goto slow case if we do not have a function.
2877  __ GetObjectType(a1, t0, t0);
2878  __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2879  __ Branch(&have_js_function);
2880 }
2881 
2882 
2883 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2884  // Get the receiver of the function from the stack; 1 ~ return address.
2885  __ lw(t0, MemOperand(sp, (arg_count() + 1) * kPointerSize));
2886 
2887  {
2888  FrameScope scope(masm, StackFrame::INTERNAL);
2889 
2890  // Push the receiver and the function and feedback info.
2891  __ Push(t0, a1, a2, a3);
2892 
2893  // Call the entry.
2894  IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
2895  : IC::kCallIC_Customization_Miss;
2896 
2897  ExternalReference miss = ExternalReference(IC_Utility(id),
2898  masm->isolate());
2899  __ CallExternalReference(miss, 4);
2900 
2901  // Move result to a1 and exit the internal frame.
2902  __ mov(a1, v0);
2903  }
2904 }
2905 
2906 
2907 // StringCharCodeAtGenerator.
2908 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2909  DCHECK(!t0.is(index_));
2910  DCHECK(!t0.is(result_));
2911  DCHECK(!t0.is(object_));
2912 
2913  // If the receiver is a smi trigger the non-string case.
2914  __ JumpIfSmi(object_, receiver_not_string_);
2915 
2916  // Fetch the instance type of the receiver into result register.
2919  // If the receiver is not a string trigger the non-string case.
2920  __ And(t0, result_, Operand(kIsNotStringMask));
2921  __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
2922 
2923  // If the index is non-smi trigger the non-smi case.
2924  __ JumpIfNotSmi(index_, &index_not_smi_);
2925 
2926  __ bind(&got_smi_index_);
2927 
2928  // Check for index out of range.
2930  __ Branch(index_out_of_range_, ls, t0, Operand(index_));
2931 
2932  __ sra(index_, index_, kSmiTagSize);
2933 
2935  object_,
2936  index_,
2937  result_,
2938  &call_runtime_);
2939 
2940  __ sll(result_, result_, kSmiTagSize);
2941  __ bind(&exit_);
2942 }
2943 
2944 
2946  MacroAssembler* masm,
2947  const RuntimeCallHelper& call_helper) {
2948  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2949 
2950  // Index is not a smi.
2951  __ bind(&index_not_smi_);
2952  // If index is a heap number, try converting it to an integer.
2953  __ CheckMap(index_,
2954  result_,
2955  Heap::kHeapNumberMapRootIndex,
2958  call_helper.BeforeCall(masm);
2959  // Consumed by runtime conversion function:
2960  __ Push(object_, index_);
2962  __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
2963  } else {
2965  // NumberToSmi discards numbers that are not exact integers.
2966  __ CallRuntime(Runtime::kNumberToSmi, 1);
2967  }
2968 
2969  // Save the conversion result before the pop instructions below
2970  // have a chance to overwrite it.
2971 
2972  __ Move(index_, v0);
2973  __ pop(object_);
2974  // Reload the instance type.
2977  call_helper.AfterCall(masm);
2978  // If index is still not a smi, it must be out of range.
2979  __ JumpIfNotSmi(index_, index_out_of_range_);
2980  // Otherwise, return to the fast path.
2981  __ Branch(&got_smi_index_);
2982 
2983  // Call runtime. We get here when the receiver is a string and the
2984  // index is a number, but the code of getting the actual character
2985  // is too complex (e.g., when the string needs to be flattened).
2986  __ bind(&call_runtime_);
2987  call_helper.BeforeCall(masm);
2988  __ sll(index_, index_, kSmiTagSize);
2989  __ Push(object_, index_);
2990  __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
2991 
2992  __ Move(result_, v0);
2993 
2994  call_helper.AfterCall(masm);
2995  __ jmp(&exit_);
2996 
2997  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2998 }
2999 
3000 
3001 // -------------------------------------------------------------------------
3002 // StringCharFromCodeGenerator
3003 
3004 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3005  // Fast case of Heap::LookupSingleCharacterStringFromCode.
3006 
3007  DCHECK(!t0.is(result_));
3008  DCHECK(!t0.is(code_));
3009 
3010  STATIC_ASSERT(kSmiTag == 0);
3013  __ And(t0,
3014  code_,
3015  Operand(kSmiTagMask |
3017  __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
3018 
3019  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3020  // At this point code register contains smi tagged one-byte char code.
3021  STATIC_ASSERT(kSmiTag == 0);
3022  __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
3023  __ Addu(result_, result_, t0);
3025  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3026  __ Branch(&slow_case_, eq, result_, Operand(t0));
3027  __ bind(&exit_);
3028 }
3029 
3030 
3032  MacroAssembler* masm,
3033  const RuntimeCallHelper& call_helper) {
3034  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3035 
3036  __ bind(&slow_case_);
3037  call_helper.BeforeCall(masm);
3038  __ push(code_);
3039  __ CallRuntime(Runtime::kCharFromCode, 1);
3040  __ Move(result_, v0);
3041 
3042  call_helper.AfterCall(masm);
3043  __ Branch(&exit_);
3044 
3045  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3046 }
3047 
3048 
3049 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
3050 
3051 
3052 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3053  Register dest,
3054  Register src,
3055  Register count,
3056  Register scratch,
3057  String::Encoding encoding) {
3058  if (FLAG_debug_code) {
3059  // Check that destination is word aligned.
3060  __ And(scratch, dest, Operand(kPointerAlignmentMask));
3061  __ Check(eq,
3062  kDestinationOfCopyNotAligned,
3063  scratch,
3064  Operand(zero_reg));
3065  }
3066 
3067  // Assumes word reads and writes are little endian.
3068  // Nothing to do for zero characters.
3069  Label done;
3070 
3071  if (encoding == String::TWO_BYTE_ENCODING) {
3072  __ Addu(count, count, count);
3073  }
3074 
3075  Register limit = count; // Read until dest equals this.
3076  __ Addu(limit, dest, Operand(count));
3077 
3078  Label loop_entry, loop;
3079  // Copy bytes from src to dest until dest hits limit.
3080  __ Branch(&loop_entry);
3081  __ bind(&loop);
3082  __ lbu(scratch, MemOperand(src));
3083  __ Addu(src, src, Operand(1));
3084  __ sb(scratch, MemOperand(dest));
3085  __ Addu(dest, dest, Operand(1));
3086  __ bind(&loop_entry);
3087  __ Branch(&loop, lt, dest, Operand(limit));
3088 
3089  __ bind(&done);
3090 }
3091 
3092 
3093 void SubStringStub::Generate(MacroAssembler* masm) {
3094  Label runtime;
3095  // Stack frame on entry.
3096  // ra: return address
3097  // sp[0]: to
3098  // sp[4]: from
3099  // sp[8]: string
3100 
3101  // This stub is called from the native-call %_SubString(...), so
3102  // nothing can be assumed about the arguments. It is tested that:
3103  // "string" is a sequential string,
3104  // both "from" and "to" are smis, and
3105  // 0 <= from <= to <= string.length.
3106  // If any of these assumptions fail, we call the runtime system.
3107 
3108  const int kToOffset = 0 * kPointerSize;
3109  const int kFromOffset = 1 * kPointerSize;
3110  const int kStringOffset = 2 * kPointerSize;
3111 
3112  __ lw(a2, MemOperand(sp, kToOffset));
3113  __ lw(a3, MemOperand(sp, kFromOffset));
3114  STATIC_ASSERT(kFromOffset == kToOffset + 4);
3115  STATIC_ASSERT(kSmiTag == 0);
3117 
3118  // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
3119  // safe in this case.
3120  __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
3121  __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
3122  // Both a2 and a3 are untagged integers.
3123 
3124  __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
3125 
3126  __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
3127  __ Subu(a2, a2, a3);
3128 
3129  // Make sure first argument is a string.
3130  __ lw(v0, MemOperand(sp, kStringOffset));
3131  __ JumpIfSmi(v0, &runtime);
3134  __ And(t0, a1, Operand(kIsNotStringMask));
3135 
3136  __ Branch(&runtime, ne, t0, Operand(zero_reg));
3137 
3138  Label single_char;
3139  __ Branch(&single_char, eq, a2, Operand(1));
3140 
3141  // Short-cut for the case of trivial substring.
3142  Label return_v0;
3143  // v0: original string
3144  // a2: result string length
3146  __ sra(t0, t0, 1);
3147  // Return original string.
3148  __ Branch(&return_v0, eq, a2, Operand(t0));
3149  // Longer than original string's length or negative: unsafe arguments.
3150  __ Branch(&runtime, hi, a2, Operand(t0));
3151  // Shorter than original string's length: an actual substring.
3152 
3153  // Deal with different string types: update the index if necessary
3154  // and put the underlying string into t1.
3155  // v0: original string
3156  // a1: instance type
3157  // a2: length
3158  // a3: from index (untagged)
3159  Label underlying_unpacked, sliced_string, seq_or_external_string;
3160  // If the string is not indirect, it can only be sequential or external.
3163  __ And(t0, a1, Operand(kIsIndirectStringMask));
3164  __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
3165  // t0 is used as a scratch register and can be overwritten in either case.
3166  __ And(t0, a1, Operand(kSlicedNotConsMask));
3167  __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
3168  // Cons string. Check whether it is flat, then fetch first part.
3170  __ LoadRoot(t0, Heap::kempty_stringRootIndex);
3171  __ Branch(&runtime, ne, t1, Operand(t0));
3173  // Update instance type.
3176  __ jmp(&underlying_unpacked);
3177 
3178  __ bind(&sliced_string);
3179  // Sliced string. Fetch parent and correct start index by offset.
3182  __ sra(t0, t0, 1); // Add offset to index.
3183  __ Addu(a3, a3, t0);
3184  // Update instance type.
3187  __ jmp(&underlying_unpacked);
3188 
3189  __ bind(&seq_or_external_string);
3190  // Sequential or external string. Just move string to the expected register.
3191  __ mov(t1, v0);
3192 
3193  __ bind(&underlying_unpacked);
3194 
3195  if (FLAG_string_slices) {
3196  Label copy_routine;
3197  // t1: underlying subject string
3198  // a1: instance type of underlying subject string
3199  // a2: length
3200  // a3: adjusted start index (untagged)
3201  // Short slice. Copy instead of slicing.
3202  __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
3203  // Allocate new sliced string. At this point we do not reload the instance
3204  // type including the string encoding because we simply rely on the info
3205  // provided by the original string. It does not matter if the original
3206  // string's encoding is wrong because we always have to recheck encoding of
3207  // the newly created string's parent anyways due to externalized strings.
3208  Label two_byte_slice, set_slice_header;
3211  __ And(t0, a1, Operand(kStringEncodingMask));
3212  __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
3213  __ AllocateOneByteSlicedString(v0, a2, t2, t3, &runtime);
3214  __ jmp(&set_slice_header);
3215  __ bind(&two_byte_slice);
3216  __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
3217  __ bind(&set_slice_header);
3218  __ sll(a3, a3, 1);
3221  __ jmp(&return_v0);
3222 
3223  __ bind(&copy_routine);
3224  }
3225 
3226  // t1: underlying subject string
3227  // a1: instance type of underlying subject string
3228  // a2: length
3229  // a3: adjusted start index (untagged)
3230  Label two_byte_sequential, sequential_string, allocate_result;
3233  __ And(t0, a1, Operand(kExternalStringTag));
3234  __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
3235 
3236  // Handle external string.
3237  // Rule out short external strings.
3239  __ And(t0, a1, Operand(kShortExternalStringTag));
3240  __ Branch(&runtime, ne, t0, Operand(zero_reg));
3242  // t1 already points to the first character of underlying string.
3243  __ jmp(&allocate_result);
3244 
3245  __ bind(&sequential_string);
3246  // Locate first character of underlying subject string.
3248  __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3249 
3250  __ bind(&allocate_result);
3251  // Sequential acii string. Allocate the result.
3253  __ And(t0, a1, Operand(kStringEncodingMask));
3254  __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
3255 
3256  // Allocate and copy the resulting ASCII string.
3257  __ AllocateOneByteString(v0, a2, t0, t2, t3, &runtime);
3258 
3259  // Locate first character of substring to copy.
3260  __ Addu(t1, t1, a3);
3261 
3262  // Locate first character of result.
3263  __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3264 
3265  // v0: result string
3266  // a1: first character of result string
3267  // a2: result string length
3268  // t1: first character of substring to copy
3271  masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING);
3272  __ jmp(&return_v0);
3273 
3274  // Allocate and copy the resulting two-byte string.
3275  __ bind(&two_byte_sequential);
3276  __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
3277 
3278  // Locate first character of substring to copy.
3279  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3280  __ sll(t0, a3, 1);
3281  __ Addu(t1, t1, t0);
3282  // Locate first character of result.
3283  __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3284 
3285  // v0: result string.
3286  // a1: first character of result.
3287  // a2: result length.
3288  // t1: first character of substring to copy.
3291  masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING);
3292 
3293  __ bind(&return_v0);
3294  Counters* counters = isolate()->counters();
3295  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
3296  __ DropAndRet(3);
3297 
3298  // Just jump to runtime to create the sub string.
3299  __ bind(&runtime);
3300  __ TailCallRuntime(Runtime::kSubString, 3, 1);
3301 
3302  __ bind(&single_char);
3303  // v0: original string
3304  // a1: instance type
3305  // a2: length
3306  // a3: from index (untagged)
3307  __ SmiTag(a3, a3);
3308  StringCharAtGenerator generator(
3309  v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
3310  generator.GenerateFast(masm);
3311  __ DropAndRet(3);
3312  generator.SkipSlow(masm, &runtime);
3313 }
3314 
3315 
3317  MacroAssembler* masm, Register left, Register right, Register scratch1,
3318  Register scratch2, Register scratch3) {
3319  Register length = scratch1;
3320 
3321  // Compare lengths.
3322  Label strings_not_equal, check_zero_length;
3323  __ lw(length, FieldMemOperand(left, String::kLengthOffset));
3324  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3325  __ Branch(&check_zero_length, eq, length, Operand(scratch2));
3326  __ bind(&strings_not_equal);
3327  DCHECK(is_int16(NOT_EQUAL));
3328  __ Ret(USE_DELAY_SLOT);
3329  __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
3330 
3331  // Check if the length is zero.
3332  Label compare_chars;
3333  __ bind(&check_zero_length);
3334  STATIC_ASSERT(kSmiTag == 0);
3335  __ Branch(&compare_chars, ne, length, Operand(zero_reg));
3336  DCHECK(is_int16(EQUAL));
3337  __ Ret(USE_DELAY_SLOT);
3338  __ li(v0, Operand(Smi::FromInt(EQUAL)));
3339 
3340  // Compare characters.
3341  __ bind(&compare_chars);
3342 
3343  GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
3344  v0, &strings_not_equal);
3345 
3346  // Characters are equal.
3347  __ Ret(USE_DELAY_SLOT);
3348  __ li(v0, Operand(Smi::FromInt(EQUAL)));
3349 }
3350 
3351 
3353  MacroAssembler* masm, Register left, Register right, Register scratch1,
3354  Register scratch2, Register scratch3, Register scratch4) {
3355  Label result_not_equal, compare_lengths;
3356  // Find minimum length and length difference.
3357  __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
3358  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3359  __ Subu(scratch3, scratch1, Operand(scratch2));
3360  Register length_delta = scratch3;
3361  __ slt(scratch4, scratch2, scratch1);
3362  __ Movn(scratch1, scratch2, scratch4);
3363  Register min_length = scratch1;
3364  STATIC_ASSERT(kSmiTag == 0);
3365  __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
3366 
3367  // Compare loop.
3368  GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3369  scratch4, v0, &result_not_equal);
3370 
3371  // Compare lengths - strings up to min-length are equal.
3372  __ bind(&compare_lengths);
3373  DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3374  // Use length_delta as result if it's zero.
3375  __ mov(scratch2, length_delta);
3376  __ mov(scratch4, zero_reg);
3377  __ mov(v0, zero_reg);
3378 
3379  __ bind(&result_not_equal);
3380  // Conditionally update the result based either on length_delta or
3381  // the last comparion performed in the loop above.
3382  Label ret;
3383  __ Branch(&ret, eq, scratch2, Operand(scratch4));
3384  __ li(v0, Operand(Smi::FromInt(GREATER)));
3385  __ Branch(&ret, gt, scratch2, Operand(scratch4));
3386  __ li(v0, Operand(Smi::FromInt(LESS)));
3387  __ bind(&ret);
3388  __ Ret();
3389 }
3390 
3391 
3393  MacroAssembler* masm, Register left, Register right, Register length,
3394  Register scratch1, Register scratch2, Register scratch3,
3395  Label* chars_not_equal) {
3396  // Change index to run from -length to -1 by adding length to string
3397  // start. This means that loop ends when index reaches zero, which
3398  // doesn't need an additional compare.
3399  __ SmiUntag(length);
3400  __ Addu(scratch1, length,
3402  __ Addu(left, left, Operand(scratch1));
3403  __ Addu(right, right, Operand(scratch1));
3404  __ Subu(length, zero_reg, length);
3405  Register index = length; // index = -length;
3406 
3407 
3408  // Compare loop.
3409  Label loop;
3410  __ bind(&loop);
3411  __ Addu(scratch3, left, index);
3412  __ lbu(scratch1, MemOperand(scratch3));
3413  __ Addu(scratch3, right, index);
3414  __ lbu(scratch2, MemOperand(scratch3));
3415  __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
3416  __ Addu(index, index, 1);
3417  __ Branch(&loop, ne, index, Operand(zero_reg));
3418 }
3419 
3420 
3421 void StringCompareStub::Generate(MacroAssembler* masm) {
3422  Label runtime;
3423 
3424  Counters* counters = isolate()->counters();
3425 
3426  // Stack frame on entry.
3427  // sp[0]: right string
3428  // sp[4]: left string
3429  __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
3430  __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
3431 
3432  Label not_same;
3433  __ Branch(&not_same, ne, a0, Operand(a1));
3434  STATIC_ASSERT(EQUAL == 0);
3435  STATIC_ASSERT(kSmiTag == 0);
3436  __ li(v0, Operand(Smi::FromInt(EQUAL)));
3437  __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
3438  __ DropAndRet(2);
3439 
3440  __ bind(&not_same);
3441 
3442  // Check that both objects are sequential one-byte strings.
3443  __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
3444 
3445  // Compare flat ASCII strings natively. Remove arguments from stack first.
3446  __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
3447  __ Addu(sp, sp, Operand(2 * kPointerSize));
3448  StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
3449 
3450  __ bind(&runtime);
3451  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3452 }
3453 
3454 
3455 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3456  // ----------- S t a t e -------------
3457  // -- a1 : left
3458  // -- a0 : right
3459  // -- ra : return address
3460  // -----------------------------------
3461 
3462  // Load a2 with the allocation site. We stick an undefined dummy value here
3463  // and replace it with the real allocation site later when we instantiate this
3464  // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3465  __ li(a2, handle(isolate()->heap()->undefined_value()));
3466 
3467  // Make sure that we actually patched the allocation site.
3468  if (FLAG_debug_code) {
3469  __ And(at, a2, Operand(kSmiTagMask));
3470  __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
3472  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3473  __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
3474  }
3475 
3476  // Tail call into the stub that handles binary operations with allocation
3477  // sites.
3478  BinaryOpWithAllocationSiteStub stub(isolate(), state());
3479  __ TailCallStub(&stub);
3480 }
3481 
3482 
3483 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3485  Label miss;
3486  __ Or(a2, a1, a0);
3487  __ JumpIfNotSmi(a2, &miss);
3488 
3489  if (GetCondition() == eq) {
3490  // For equality we do not care about the sign of the result.
3491  __ Ret(USE_DELAY_SLOT);
3492  __ Subu(v0, a0, a1);
3493  } else {
3494  // Untag before subtracting to avoid handling overflow.
3495  __ SmiUntag(a1);
3496  __ SmiUntag(a0);
3497  __ Ret(USE_DELAY_SLOT);
3498  __ Subu(v0, a1, a0);
3499  }
3500 
3501  __ bind(&miss);
3502  GenerateMiss(masm);
3503 }
3504 
3505 
3506 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3508 
3509  Label generic_stub;
3510  Label unordered, maybe_undefined1, maybe_undefined2;
3511  Label miss;
3512 
3513  if (left() == CompareICState::SMI) {
3514  __ JumpIfNotSmi(a1, &miss);
3515  }
3516  if (right() == CompareICState::SMI) {
3517  __ JumpIfNotSmi(a0, &miss);
3518  }
3519 
3520  // Inlining the double comparison and falling back to the general compare
3521  // stub if NaN is involved.
3522  // Load left and right operand.
3523  Label done, left, left_smi, right_smi;
3524  __ JumpIfSmi(a0, &right_smi);
3525  __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3527  __ Subu(a2, a0, Operand(kHeapObjectTag));
3529  __ Branch(&left);
3530  __ bind(&right_smi);
3531  __ SmiUntag(a2, a0); // Can't clobber a0 yet.
3532  FPURegister single_scratch = f6;
3533  __ mtc1(a2, single_scratch);
3534  __ cvt_d_w(f2, single_scratch);
3535 
3536  __ bind(&left);
3537  __ JumpIfSmi(a1, &left_smi);
3538  __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3540  __ Subu(a2, a1, Operand(kHeapObjectTag));
3542  __ Branch(&done);
3543  __ bind(&left_smi);
3544  __ SmiUntag(a2, a1); // Can't clobber a1 yet.
3545  single_scratch = f8;
3546  __ mtc1(a2, single_scratch);
3547  __ cvt_d_w(f0, single_scratch);
3548 
3549  __ bind(&done);
3550 
3551  // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
3552  Label fpu_eq, fpu_lt;
3553  // Test if equal, and also handle the unordered/NaN case.
3554  __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
3555 
3556  // Test if less (unordered case is already handled).
3557  __ BranchF(&fpu_lt, NULL, lt, f0, f2);
3558 
3559  // Otherwise it's greater, so just fall thru, and return.
3560  DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
3561  __ Ret(USE_DELAY_SLOT);
3562  __ li(v0, Operand(GREATER));
3563 
3564  __ bind(&fpu_eq);
3565  __ Ret(USE_DELAY_SLOT);
3566  __ li(v0, Operand(EQUAL));
3567 
3568  __ bind(&fpu_lt);
3569  __ Ret(USE_DELAY_SLOT);
3570  __ li(v0, Operand(LESS));
3571 
3572  __ bind(&unordered);
3573  __ bind(&generic_stub);
3574  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
3576  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3577 
3578  __ bind(&maybe_undefined1);
3580  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3581  __ Branch(&miss, ne, a0, Operand(at));
3582  __ JumpIfSmi(a1, &unordered);
3583  __ GetObjectType(a1, a2, a2);
3584  __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
3585  __ jmp(&unordered);
3586  }
3587 
3588  __ bind(&maybe_undefined2);
3590  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3591  __ Branch(&unordered, eq, a1, Operand(at));
3592  }
3593 
3594  __ bind(&miss);
3595  GenerateMiss(masm);
3596 }
3597 
3598 
3599 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3601  Label miss;
3602 
3603  // Registers containing left and right operands respectively.
3604  Register left = a1;
3605  Register right = a0;
3606  Register tmp1 = a2;
3607  Register tmp2 = a3;
3608 
3609  // Check that both operands are heap objects.
3610  __ JumpIfEitherSmi(left, right, &miss);
3611 
3612  // Check that both operands are internalized strings.
3615  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3616  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3618  __ Or(tmp1, tmp1, Operand(tmp2));
3619  __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3620  __ Branch(&miss, ne, at, Operand(zero_reg));
3621 
3622  // Make sure a0 is non-zero. At this point input operands are
3623  // guaranteed to be non-zero.
3624  DCHECK(right.is(a0));
3625  STATIC_ASSERT(EQUAL == 0);
3626  STATIC_ASSERT(kSmiTag == 0);
3627  __ mov(v0, right);
3628  // Internalized strings are compared by identity.
3629  __ Ret(ne, left, Operand(right));
3630  DCHECK(is_int16(EQUAL));
3631  __ Ret(USE_DELAY_SLOT);
3632  __ li(v0, Operand(Smi::FromInt(EQUAL)));
3633 
3634  __ bind(&miss);
3635  GenerateMiss(masm);
3636 }
3637 
3638 
3639 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3641  DCHECK(GetCondition() == eq);
3642  Label miss;
3643 
3644  // Registers containing left and right operands respectively.
3645  Register left = a1;
3646  Register right = a0;
3647  Register tmp1 = a2;
3648  Register tmp2 = a3;
3649 
3650  // Check that both operands are heap objects.
3651  __ JumpIfEitherSmi(left, right, &miss);
3652 
3653  // Check that both operands are unique names. This leaves the instance
3654  // types loaded in tmp1 and tmp2.
3657  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3658  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3659 
3660  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
3661  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
3662 
3663  // Use a0 as result
3664  __ mov(v0, a0);
3665 
3666  // Unique names are compared by identity.
3667  Label done;
3668  __ Branch(&done, ne, left, Operand(right));
3669  // Make sure a0 is non-zero. At this point input operands are
3670  // guaranteed to be non-zero.
3671  DCHECK(right.is(a0));
3672  STATIC_ASSERT(EQUAL == 0);
3673  STATIC_ASSERT(kSmiTag == 0);
3674  __ li(v0, Operand(Smi::FromInt(EQUAL)));
3675  __ bind(&done);
3676  __ Ret();
3677 
3678  __ bind(&miss);
3679  GenerateMiss(masm);
3680 }
3681 
3682 
3683 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3685  Label miss;
3686 
3687  bool equality = Token::IsEqualityOp(op());
3688 
3689  // Registers containing left and right operands respectively.
3690  Register left = a1;
3691  Register right = a0;
3692  Register tmp1 = a2;
3693  Register tmp2 = a3;
3694  Register tmp3 = t0;
3695  Register tmp4 = t1;
3696  Register tmp5 = t2;
3697 
3698  // Check that both operands are heap objects.
3699  __ JumpIfEitherSmi(left, right, &miss);
3700 
3701  // Check that both operands are strings. This leaves the instance
3702  // types loaded in tmp1 and tmp2.
3705  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3706  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3708  __ Or(tmp3, tmp1, tmp2);
3709  __ And(tmp5, tmp3, Operand(kIsNotStringMask));
3710  __ Branch(&miss, ne, tmp5, Operand(zero_reg));
3711 
3712  // Fast check for identical strings.
3713  Label left_ne_right;
3714  STATIC_ASSERT(EQUAL == 0);
3715  STATIC_ASSERT(kSmiTag == 0);
3716  __ Branch(&left_ne_right, ne, left, Operand(right));
3717  __ Ret(USE_DELAY_SLOT);
3718  __ mov(v0, zero_reg); // In the delay slot.
3719  __ bind(&left_ne_right);
3720 
3721  // Handle not identical strings.
3722 
3723  // Check that both strings are internalized strings. If they are, we're done
3724  // because we already know they are not identical. We know they are both
3725  // strings.
3726  if (equality) {
3727  DCHECK(GetCondition() == eq);
3729  __ Or(tmp3, tmp1, Operand(tmp2));
3730  __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
3731  Label is_symbol;
3732  __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
3733  // Make sure a0 is non-zero. At this point input operands are
3734  // guaranteed to be non-zero.
3735  DCHECK(right.is(a0));
3736  __ Ret(USE_DELAY_SLOT);
3737  __ mov(v0, a0); // In the delay slot.
3738  __ bind(&is_symbol);
3739  }
3740 
3741  // Check that both strings are sequential one-byte.
3742  Label runtime;
3743  __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
3744  &runtime);
3745 
3746  // Compare flat one-byte strings. Returns when done.
3747  if (equality) {
3749  tmp3);
3750  } else {
3752  tmp2, tmp3, tmp4);
3753  }
3754 
3755  // Handle more complex cases in runtime.
3756  __ bind(&runtime);
3757  __ Push(left, right);
3758  if (equality) {
3759  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3760  } else {
3761  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3762  }
3763 
3764  __ bind(&miss);
3765  GenerateMiss(masm);
3766 }
3767 
3768 
3769 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3771  Label miss;
3772  __ And(a2, a1, Operand(a0));
3773  __ JumpIfSmi(a2, &miss);
3774 
3775  __ GetObjectType(a0, a2, a2);
3776  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3777  __ GetObjectType(a1, a2, a2);
3778  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3779 
3780  DCHECK(GetCondition() == eq);
3781  __ Ret(USE_DELAY_SLOT);
3782  __ subu(v0, a0, a1);
3783 
3784  __ bind(&miss);
3785  GenerateMiss(masm);
3786 }
3787 
3788 
3789 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3790  Label miss;
3791  __ And(a2, a1, a0);
3792  __ JumpIfSmi(a2, &miss);
3795  __ Branch(&miss, ne, a2, Operand(known_map_));
3796  __ Branch(&miss, ne, a3, Operand(known_map_));
3797 
3798  __ Ret(USE_DELAY_SLOT);
3799  __ subu(v0, a0, a1);
3800 
3801  __ bind(&miss);
3802  GenerateMiss(masm);
3803 }
3804 
3805 
3806 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3807  {
3808  // Call the runtime system in a fresh internal frame.
3809  ExternalReference miss =
3810  ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
3811  FrameScope scope(masm, StackFrame::INTERNAL);
3812  __ Push(a1, a0);
3813  __ Push(ra, a1, a0);
3814  __ li(t0, Operand(Smi::FromInt(op())));
3815  __ addiu(sp, sp, -kPointerSize);
3816  __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
3817  __ sw(t0, MemOperand(sp)); // In the delay slot.
3818  // Compute the entry point of the rewritten stub.
3819  __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
3820  // Restore registers.
3821  __ Pop(a1, a0, ra);
3822  }
3823  __ Jump(a2);
3824 }
3825 
3826 
3827 void DirectCEntryStub::Generate(MacroAssembler* masm) {
3828  // Make place for arguments to fit C calling convention. Most of the callers
3829  // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
3830  // so they handle stack restoring and we don't have to do that here.
3831  // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
3832  // kCArgsSlotsSize stack space after the call.
3833  __ Subu(sp, sp, Operand(kCArgsSlotsSize));
3834  // Place the return address on the stack, making the call
3835  // GC safe. The RegExp backend also relies on this.
3836  __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
3837  __ Call(t9); // Call the C++ function.
3838  __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
3839 
3840  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3841  // In case of an error the return address may point to a memory area
3842  // filled with kZapValue by the GC.
3843  // Dereference the address and check for this.
3844  __ lw(t0, MemOperand(t9));
3845  __ Assert(ne, kReceivedInvalidReturnAddress, t0,
3846  Operand(reinterpret_cast<uint32_t>(kZapValue)));
3847  }
3848  __ Jump(t9);
3849 }
3850 
3851 
3852 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
3853  Register target) {
3854  intptr_t loc =
3855  reinterpret_cast<intptr_t>(GetCode().location());
3856  __ Move(t9, target);
3857  __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
3858  __ Call(ra);
3859 }
3860 
3861 
3862 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3863  Label* miss,
3864  Label* done,
3865  Register receiver,
3866  Register properties,
3867  Handle<Name> name,
3868  Register scratch0) {
3869  DCHECK(name->IsUniqueName());
3870  // If names of slots in range from 1 to kProbes - 1 for the hash value are
3871  // not equal to the name and kProbes-th slot is not used (its name is the
3872  // undefined value), it guarantees the hash table doesn't contain the
3873  // property. It's true even if some slots represent deleted properties
3874  // (their names are the hole value).
3875  for (int i = 0; i < kInlinedProbes; i++) {
3876  // scratch0 points to properties hash.
3877  // Compute the masked index: (hash + i + i * i) & mask.
3878  Register index = scratch0;
3879  // Capacity is smi 2^n.
3880  __ lw(index, FieldMemOperand(properties, kCapacityOffset));
3881  __ Subu(index, index, Operand(1));
3882  __ And(index, index, Operand(
3883  Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
3884 
3885  // Scale the index by multiplying by the entry size.
3887  __ sll(at, index, 1);
3888  __ Addu(index, index, at);
3889 
3890  Register entity_name = scratch0;
3891  // Having undefined at this place means the name is not contained.
3892  DCHECK_EQ(kSmiTagSize, 1);
3893  Register tmp = properties;
3894  __ sll(scratch0, index, 1);
3895  __ Addu(tmp, properties, scratch0);
3896  __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
3897 
3898  DCHECK(!tmp.is(entity_name));
3899  __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
3900  __ Branch(done, eq, entity_name, Operand(tmp));
3901 
3902  // Load the hole ready for use below:
3903  __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
3904 
3905  // Stop if found the property.
3906  __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
3907 
3908  Label good;
3909  __ Branch(&good, eq, entity_name, Operand(tmp));
3910 
3911  // Check if the entry name is not a unique name.
3912  __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
3913  __ lbu(entity_name,
3915  __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
3916  __ bind(&good);
3917 
3918  // Restore the properties.
3919  __ lw(properties,
3921  }
3922 
3923  const int spill_mask =
3924  (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
3925  a2.bit() | a1.bit() | a0.bit() | v0.bit());
3926 
3927  __ MultiPush(spill_mask);
3928  __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3929  __ li(a1, Operand(Handle<Name>(name)));
3930  NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
3931  __ CallStub(&stub);
3932  __ mov(at, v0);
3933  __ MultiPop(spill_mask);
3934 
3935  __ Branch(done, eq, at, Operand(zero_reg));
3936  __ Branch(miss, ne, at, Operand(zero_reg));
3937 }
3938 
3939 
3940 // Probe the name dictionary in the |elements| register. Jump to the
3941 // |done| label if a property with the given name is found. Jump to
3942 // the |miss| label otherwise.
3943 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
3944 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
3945  Label* miss,
3946  Label* done,
3947  Register elements,
3948  Register name,
3949  Register scratch1,
3950  Register scratch2) {
3951  DCHECK(!elements.is(scratch1));
3952  DCHECK(!elements.is(scratch2));
3953  DCHECK(!name.is(scratch1));
3954  DCHECK(!name.is(scratch2));
3955 
3956  __ AssertName(name);
3957 
3958  // Compute the capacity mask.
3959  __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
3960  __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
3961  __ Subu(scratch1, scratch1, Operand(1));
3962 
3963  // Generate an unrolled loop that performs a few probes before
3964  // giving up. Measurements done on Gmail indicate that 2 probes
3965  // cover ~93% of loads from dictionaries.
3966  for (int i = 0; i < kInlinedProbes; i++) {
3967  // Compute the masked index: (hash + i + i * i) & mask.
3969  if (i > 0) {
3970  // Add the probe offset (i + i * i) left shifted to avoid right shifting
3971  // the hash in a separate instruction. The value hash + i + i * i is right
3972  // shifted in the following and instruction.
3973  DCHECK(NameDictionary::GetProbeOffset(i) <
3974  1 << (32 - Name::kHashFieldOffset));
3975  __ Addu(scratch2, scratch2, Operand(
3976  NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3977  }
3978  __ srl(scratch2, scratch2, Name::kHashShift);
3979  __ And(scratch2, scratch1, scratch2);
3980 
3981  // Scale the index by multiplying by the element size.
3983  // scratch2 = scratch2 * 3.
3984 
3985  __ sll(at, scratch2, 1);
3986  __ Addu(scratch2, scratch2, at);
3987 
3988  // Check if the key is identical to the name.
3989  __ sll(at, scratch2, 2);
3990  __ Addu(scratch2, elements, at);
3991  __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
3992  __ Branch(done, eq, name, Operand(at));
3993  }
3994 
3995  const int spill_mask =
3996  (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
3997  a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
3998  ~(scratch1.bit() | scratch2.bit());
3999 
4000  __ MultiPush(spill_mask);
4001  if (name.is(a0)) {
4002  DCHECK(!elements.is(a1));
4003  __ Move(a1, name);
4004  __ Move(a0, elements);
4005  } else {
4006  __ Move(a0, elements);
4007  __ Move(a1, name);
4008  }
4009  NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4010  __ CallStub(&stub);
4011  __ mov(scratch2, a2);
4012  __ mov(at, v0);
4013  __ MultiPop(spill_mask);
4014 
4015  __ Branch(done, ne, at, Operand(zero_reg));
4016  __ Branch(miss, eq, at, Operand(zero_reg));
4017 }
4018 
4019 
4020 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4021  // This stub overrides SometimesSetsUpAFrame() to return false. That means
4022  // we cannot call anything that could cause a GC from this stub.
4023  // Registers:
4024  // result: NameDictionary to probe
4025  // a1: key
4026  // dictionary: NameDictionary to probe.
4027  // index: will hold an index of entry if lookup is successful.
4028  // might alias with result_.
4029  // Returns:
4030  // result_ is zero if lookup failed, non zero otherwise.
4031 
4032  Register result = v0;
4033  Register dictionary = a0;
4034  Register key = a1;
4035  Register index = a2;
4036  Register mask = a3;
4037  Register hash = t0;
4038  Register undefined = t1;
4039  Register entry_key = t2;
4040 
4041  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4042 
4044  __ sra(mask, mask, kSmiTagSize);
4045  __ Subu(mask, mask, Operand(1));
4046 
4047  __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4048 
4049  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4050 
4051  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4052  // Compute the masked index: (hash + i + i * i) & mask.
4053  // Capacity is smi 2^n.
4054  if (i > 0) {
4055  // Add the probe offset (i + i * i) left shifted to avoid right shifting
4056  // the hash in a separate instruction. The value hash + i + i * i is right
4057  // shifted in the following and instruction.
4058  DCHECK(NameDictionary::GetProbeOffset(i) <
4059  1 << (32 - Name::kHashFieldOffset));
4060  __ Addu(index, hash, Operand(
4061  NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4062  } else {
4063  __ mov(index, hash);
4064  }
4065  __ srl(index, index, Name::kHashShift);
4066  __ And(index, mask, index);
4067 
4068  // Scale the index by multiplying by the entry size.
4070  // index *= 3.
4071  __ mov(at, index);
4072  __ sll(index, index, 1);
4073  __ Addu(index, index, at);
4074 
4075 
4076  DCHECK_EQ(kSmiTagSize, 1);
4077  __ sll(index, index, 2);
4078  __ Addu(index, index, dictionary);
4079  __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
4080 
4081  // Having undefined at this place means the name is not contained.
4082  __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
4083 
4084  // Stop if found the property.
4085  __ Branch(&in_dictionary, eq, entry_key, Operand(key));
4086 
4087  if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4088  // Check if the entry name is not a unique name.
4089  __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4090  __ lbu(entry_key,
4092  __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
4093  }
4094  }
4095 
4096  __ bind(&maybe_in_dictionary);
4097  // If we are doing negative lookup then probing failure should be
4098  // treated as a lookup success. For positive lookup probing failure
4099  // should be treated as lookup failure.
4100  if (mode() == POSITIVE_LOOKUP) {
4101  __ Ret(USE_DELAY_SLOT);
4102  __ mov(result, zero_reg);
4103  }
4104 
4105  __ bind(&in_dictionary);
4106  __ Ret(USE_DELAY_SLOT);
4107  __ li(result, 1);
4108 
4109  __ bind(&not_in_dictionary);
4110  __ Ret(USE_DELAY_SLOT);
4111  __ mov(result, zero_reg);
4112 }
4113 
4114 
4116  Isolate* isolate) {
4117  StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4118  stub1.GetCode();
4119  // Hydrogen code stubs need stub2 at snapshot time.
4120  StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4121  stub2.GetCode();
4122 }
4123 
4124 
4125 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4126 // the value has just been written into the object, now this stub makes sure
4127 // we keep the GC informed. The word in the object where the value has been
4128 // written is in the address register.
4129 void RecordWriteStub::Generate(MacroAssembler* masm) {
4130  Label skip_to_incremental_noncompacting;
4131  Label skip_to_incremental_compacting;
4132 
4133  // The first two branch+nop instructions are generated with labels so as to
4134  // get the offset fixed up correctly by the bind(Label*) call. We patch it
4135  // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
4136  // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
4137  // incremental heap marking.
4138  // See RecordWriteStub::Patch for details.
4139  __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
4140  __ nop();
4141  __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
4142  __ nop();
4143 
4145  __ RememberedSetHelper(object(),
4146  address(),
4147  value(),
4150  }
4151  __ Ret();
4152 
4153  __ bind(&skip_to_incremental_noncompacting);
4155 
4156  __ bind(&skip_to_incremental_compacting);
4158 
4159  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4160  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4161 
4162  PatchBranchIntoNop(masm, 0);
4164 }
4165 
4166 
4167 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4168  regs_.Save(masm);
4169 
4171  Label dont_need_remembered_set;
4172 
4173  __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4174  __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4175  regs_.scratch0(),
4176  &dont_need_remembered_set);
4177 
4178  __ CheckPageFlag(regs_.object(),
4179  regs_.scratch0(),
4181  ne,
4182  &dont_need_remembered_set);
4183 
4184  // First notify the incremental marker if necessary, then update the
4185  // remembered set.
4189  regs_.Restore(masm);
4190  __ RememberedSetHelper(object(),
4191  address(),
4192  value(),
4195 
4196  __ bind(&dont_need_remembered_set);
4197  }
4198 
4202  regs_.Restore(masm);
4203  __ Ret();
4204 }
4205 
4206 
4207 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4209  int argument_count = 3;
4210  __ PrepareCallCFunction(argument_count, regs_.scratch0());
4211  Register address =
4212  a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4213  DCHECK(!address.is(regs_.object()));
4214  DCHECK(!address.is(a0));
4215  __ Move(address, regs_.address());
4216  __ Move(a0, regs_.object());
4217  __ Move(a1, address);
4218  __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4219 
4220  AllowExternalCallThatCantCauseGC scope(masm);
4221  __ CallCFunction(
4222  ExternalReference::incremental_marking_record_write_function(isolate()),
4223  argument_count);
4225 }
4226 
4227 
4229  MacroAssembler* masm,
4230  OnNoNeedToInformIncrementalMarker on_no_need,
4231  Mode mode) {
4232  Label on_black;
4233  Label need_incremental;
4234  Label need_incremental_pop_scratch;
4235 
4236  __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4237  __ lw(regs_.scratch1(),
4240  __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
4241  __ sw(regs_.scratch1(),
4244  __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
4245 
4246  // Let's look at the color of the object: If it is not black we don't have
4247  // to inform the incremental marker.
4248  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4249 
4250  regs_.Restore(masm);
4252  __ RememberedSetHelper(object(),
4253  address(),
4254  value(),
4257  } else {
4258  __ Ret();
4259  }
4260 
4261  __ bind(&on_black);
4262 
4263  // Get the value from the slot.
4264  __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4265 
4266  if (mode == INCREMENTAL_COMPACTION) {
4267  Label ensure_not_white;
4268 
4269  __ CheckPageFlag(regs_.scratch0(), // Contains value.
4270  regs_.scratch1(), // Scratch.
4272  eq,
4273  &ensure_not_white);
4274 
4275  __ CheckPageFlag(regs_.object(),
4276  regs_.scratch1(), // Scratch.
4278  eq,
4279  &need_incremental);
4280 
4281  __ bind(&ensure_not_white);
4282  }
4283 
4284  // We need extra registers for this, so we push the object and the address
4285  // register temporarily.
4286  __ Push(regs_.object(), regs_.address());
4287  __ EnsureNotWhite(regs_.scratch0(), // The value.
4288  regs_.scratch1(), // Scratch.
4289  regs_.object(), // Scratch.
4290  regs_.address(), // Scratch.
4291  &need_incremental_pop_scratch);
4292  __ Pop(regs_.object(), regs_.address());
4293 
4294  regs_.Restore(masm);
4296  __ RememberedSetHelper(object(),
4297  address(),
4298  value(),
4301  } else {
4302  __ Ret();
4303  }
4304 
4305  __ bind(&need_incremental_pop_scratch);
4306  __ Pop(regs_.object(), regs_.address());
4307 
4308  __ bind(&need_incremental);
4309 
4310  // Fall through when we need to inform the incremental marker.
4311 }
4312 
4313 
4314 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4315  // ----------- S t a t e -------------
4316  // -- a0 : element value to store
4317  // -- a3 : element index as smi
4318  // -- sp[0] : array literal index in function as smi
4319  // -- sp[4] : array literal
4320  // clobbers a1, a2, t0
4321  // -----------------------------------
4322 
4323  Label element_done;
4324  Label double_elements;
4325  Label smi_element;
4326  Label slow_elements;
4327  Label fast_elements;
4328 
4329  // Get array literal index, array literal and its map.
4330  __ lw(t0, MemOperand(sp, 0 * kPointerSize));
4331  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
4333 
4334  __ CheckFastElements(a2, t1, &double_elements);
4335  // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
4336  __ JumpIfSmi(a0, &smi_element);
4337  __ CheckFastSmiElements(a2, t1, &fast_elements);
4338 
4339  // Store into the array literal requires a elements transition. Call into
4340  // the runtime.
4341  __ bind(&slow_elements);
4342  // call.
4343  __ Push(a1, a3, a0);
4346  __ Push(t1, t0);
4347  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4348 
4349  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4350  __ bind(&fast_elements);
4352  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4353  __ Addu(t2, t1, t2);
4354  __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4355  __ sw(a0, MemOperand(t2, 0));
4356  // Update the write barrier for the array store.
4357  __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
4359  __ Ret(USE_DELAY_SLOT);
4360  __ mov(v0, a0);
4361 
4362  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4363  // and value is Smi.
4364  __ bind(&smi_element);
4366  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4367  __ Addu(t2, t1, t2);
4369  __ Ret(USE_DELAY_SLOT);
4370  __ mov(v0, a0);
4371 
4372  // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
4373  __ bind(&double_elements);
4375  __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
4376  __ Ret(USE_DELAY_SLOT);
4377  __ mov(v0, a0);
4378 }
4379 
4380 
4381 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4382  CEntryStub ces(isolate(), 1, kSaveFPRegs);
4383  __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4384  int parameter_count_offset =
4386  __ lw(a1, MemOperand(fp, parameter_count_offset));
4388  __ Addu(a1, a1, Operand(1));
4389  }
4390  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4391  __ sll(a1, a1, kPointerSizeLog2);
4392  __ Ret(USE_DELAY_SLOT);
4393  __ Addu(sp, sp, a1);
4394 }
4395 
4396 
4397 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4398  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4399  VectorLoadStub stub(isolate(), state());
4400  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4401 }
4402 
4403 
4404 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4405  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4406  VectorKeyedLoadStub stub(isolate());
4407  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4408 }
4409 
4410 
4411 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4412  if (masm->isolate()->function_entry_hook() != NULL) {
4413  ProfileEntryHookStub stub(masm->isolate());
4414  __ push(ra);
4415  __ CallStub(&stub);
4416  __ pop(ra);
4417  }
4418 }
4419 
4420 
4421 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4422  // The entry hook is a "push ra" instruction, followed by a call.
4423  // Note: on MIPS "push" is 2 instruction
4424  const int32_t kReturnAddressDistanceFromFunctionStart =
4426 
4427  // This should contain all kJSCallerSaved registers.
4428  const RegList kSavedRegs =
4429  kJSCallerSaved | // Caller saved registers.
4430  s5.bit(); // Saved stack pointer.
4431 
4432  // We also save ra, so the count here is one higher than the mask indicates.
4433  const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
4434 
4435  // Save all caller-save registers as this may be called from anywhere.
4436  __ MultiPush(kSavedRegs | ra.bit());
4437 
4438  // Compute the function's address for the first argument.
4439  __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
4440 
4441  // The caller's return address is above the saved temporaries.
4442  // Grab that for the second argument to the hook.
4443  __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
4444 
4445  // Align the stack if necessary.
4446  int frame_alignment = masm->ActivationFrameAlignment();
4447  if (frame_alignment > kPointerSize) {
4448  __ mov(s5, sp);
4449  DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4450  __ And(sp, sp, Operand(-frame_alignment));
4451  }
4452  __ Subu(sp, sp, kCArgsSlotsSize);
4453 #if defined(V8_HOST_ARCH_MIPS)
4454  int32_t entry_hook =
4455  reinterpret_cast<int32_t>(isolate()->function_entry_hook());
4456  __ li(t9, Operand(entry_hook));
4457 #else
4458  // Under the simulator we need to indirect the entry hook through a
4459  // trampoline function at a known address.
4460  // It additionally takes an isolate as a third parameter.
4461  __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4462 
4463  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4464  __ li(t9, Operand(ExternalReference(&dispatcher,
4465  ExternalReference::BUILTIN_CALL,
4466  isolate())));
4467 #endif
4468  // Call C function through t9 to conform ABI for PIC.
4469  __ Call(t9);
4470 
4471  // Restore the stack pointer if needed.
4472  if (frame_alignment > kPointerSize) {
4473  __ mov(sp, s5);
4474  } else {
4475  __ Addu(sp, sp, kCArgsSlotsSize);
4476  }
4477 
4478  // Also pop ra to get Ret(0).
4479  __ MultiPop(kSavedRegs | ra.bit());
4480  __ Ret();
4481 }
4482 
4483 
4484 template<class T>
4485 static void CreateArrayDispatch(MacroAssembler* masm,
4487  if (mode == DISABLE_ALLOCATION_SITES) {
4488  T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4489  __ TailCallStub(&stub);
4490  } else if (mode == DONT_OVERRIDE) {
4491  int last_index = GetSequenceIndexFromFastElementsKind(
4493  for (int i = 0; i <= last_index; ++i) {
4495  T stub(masm->isolate(), kind);
4496  __ TailCallStub(&stub, eq, a3, Operand(kind));
4497  }
4498 
4499  // If we reached this point there is a problem.
4500  __ Abort(kUnexpectedElementsKindInArrayConstructor);
4501  } else {
4502  UNREACHABLE();
4503  }
4504 }
4505 
4506 
4507 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4509  // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4510  // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4511  // a0 - number of arguments
4512  // a1 - constructor?
4513  // sp[0] - last argument
4514  Label normal_sequence;
4515  if (mode == DONT_OVERRIDE) {
4516  DCHECK(FAST_SMI_ELEMENTS == 0);
4518  DCHECK(FAST_ELEMENTS == 2);
4522 
4523  // is the low bit set? If so, we are holey and that is good.
4524  __ And(at, a3, Operand(1));
4525  __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
4526  }
4527 
4528  // look at the first argument
4529  __ lw(t1, MemOperand(sp, 0));
4530  __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
4531 
4532  if (mode == DISABLE_ALLOCATION_SITES) {
4534  ElementsKind holey_initial = GetHoleyElementsKind(initial);
4535 
4536  ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4537  holey_initial,
4539  __ TailCallStub(&stub_holey);
4540 
4541  __ bind(&normal_sequence);
4542  ArraySingleArgumentConstructorStub stub(masm->isolate(),
4543  initial,
4545  __ TailCallStub(&stub);
4546  } else if (mode == DONT_OVERRIDE) {
4547  // We are going to create a holey array, but our kind is non-holey.
4548  // Fix kind and retry (only if we have an allocation site in the slot).
4549  __ Addu(a3, a3, Operand(1));
4550 
4551  if (FLAG_debug_code) {
4552  __ lw(t1, FieldMemOperand(a2, 0));
4553  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
4554  __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
4555  }
4556 
4557  // Save the resulting elements kind in type info. We can't just store a3
4558  // in the AllocationSite::transition_info field because elements kind is
4559  // restricted to a portion of the field...upper bits need to be left alone.
4562  __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
4564 
4565 
4566  __ bind(&normal_sequence);
4567  int last_index = GetSequenceIndexFromFastElementsKind(
4569  for (int i = 0; i <= last_index; ++i) {
4571  ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4572  __ TailCallStub(&stub, eq, a3, Operand(kind));
4573  }
4574 
4575  // If we reached this point there is a problem.
4576  __ Abort(kUnexpectedElementsKindInArrayConstructor);
4577  } else {
4578  UNREACHABLE();
4579  }
4580 }
4581 
4582 
4583 template<class T>
4584 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4585  int to_index = GetSequenceIndexFromFastElementsKind(
4587  for (int i = 0; i <= to_index; ++i) {
4589  T stub(isolate, kind);
4590  stub.GetCode();
4592  T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4593  stub1.GetCode();
4594  }
4595  }
4596 }
4597 
4598 
4600  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4601  isolate);
4602  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4603  isolate);
4604  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4605  isolate);
4606 }
4607 
4608 
4610  Isolate* isolate) {
4612  for (int i = 0; i < 2; i++) {
4613  // For internal arrays we only need a few things.
4614  InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4615  stubh1.GetCode();
4616  InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4617  stubh2.GetCode();
4618  InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4619  stubh3.GetCode();
4620  }
4621 }
4622 
4623 
4625  MacroAssembler* masm,
4627  if (argument_count() == ANY) {
4628  Label not_zero_case, not_one_case;
4629  __ And(at, a0, a0);
4630  __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
4631  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4632 
4633  __ bind(&not_zero_case);
4634  __ Branch(&not_one_case, gt, a0, Operand(1));
4635  CreateArrayDispatchOneArgument(masm, mode);
4636 
4637  __ bind(&not_one_case);
4638  CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4639  } else if (argument_count() == NONE) {
4640  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4641  } else if (argument_count() == ONE) {
4642  CreateArrayDispatchOneArgument(masm, mode);
4643  } else if (argument_count() == MORE_THAN_ONE) {
4644  CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4645  } else {
4646  UNREACHABLE();
4647  }
4648 }
4649 
4650 
4651 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4652  // ----------- S t a t e -------------
4653  // -- a0 : argc (only if argument_count() == ANY)
4654  // -- a1 : constructor
4655  // -- a2 : AllocationSite or undefined
4656  // -- sp[0] : return address
4657  // -- sp[4] : last argument
4658  // -----------------------------------
4659 
4660  if (FLAG_debug_code) {
4661  // The array construct code is only set for the global and natives
4662  // builtin Array functions which always have maps.
4663 
4664  // Initial map for the builtin Array function should be a map.
4666  // Will both indicate a NULL and a Smi.
4667  __ SmiTst(t0, at);
4668  __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
4669  at, Operand(zero_reg));
4670  __ GetObjectType(t0, t0, t1);
4671  __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
4672  t1, Operand(MAP_TYPE));
4673 
4674  // We should either have undefined in a2 or a valid AllocationSite
4675  __ AssertUndefinedOrAllocationSite(a2, t0);
4676  }
4677 
4678  Label no_info;
4679  // Get the elements kind and case on that.
4680  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4681  __ Branch(&no_info, eq, a2, Operand(at));
4682 
4684  __ SmiUntag(a3);
4686  __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
4688 
4689  __ bind(&no_info);
4691 }
4692 
4693 
4695  MacroAssembler* masm, ElementsKind kind) {
4696 
4697  InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4698  __ TailCallStub(&stub0, lo, a0, Operand(1));
4699 
4700  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4701  __ TailCallStub(&stubN, hi, a0, Operand(1));
4702 
4703  if (IsFastPackedElementsKind(kind)) {
4704  // We might need to create a holey array
4705  // look at the first argument.
4706  __ lw(at, MemOperand(sp, 0));
4707 
4708  InternalArraySingleArgumentConstructorStub
4709  stub1_holey(isolate(), GetHoleyElementsKind(kind));
4710  __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
4711  }
4712 
4713  InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4714  __ TailCallStub(&stub1);
4715 }
4716 
4717 
4718 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4719  // ----------- S t a t e -------------
4720  // -- a0 : argc
4721  // -- a1 : constructor
4722  // -- sp[0] : return address
4723  // -- sp[4] : last argument
4724  // -----------------------------------
4725 
4726  if (FLAG_debug_code) {
4727  // The array construct code is only set for the global and natives
4728  // builtin Array functions which always have maps.
4729 
4730  // Initial map for the builtin Array function should be a map.
4732  // Will both indicate a NULL and a Smi.
4733  __ SmiTst(a3, at);
4734  __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
4735  at, Operand(zero_reg));
4736  __ GetObjectType(a3, a3, t0);
4737  __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
4738  t0, Operand(MAP_TYPE));
4739  }
4740 
4741  // Figure out the right elements kind.
4743 
4744  // Load the map's "bit field 2" into a3. We only need the first byte,
4745  // but the following bit field extraction takes care of that anyway.
4747  // Retrieve elements_kind from bit field 2.
4748  __ DecodeField<Map::ElementsKindBits>(a3);
4749 
4750  if (FLAG_debug_code) {
4751  Label done;
4752  __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
4753  __ Assert(
4754  eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
4755  a3, Operand(FAST_HOLEY_ELEMENTS));
4756  __ bind(&done);
4757  }
4758 
4759  Label fast_elements_case;
4760  __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
4762 
4763  __ bind(&fast_elements_case);
4764  GenerateCase(masm, FAST_ELEMENTS);
4765 }
4766 
4767 
4768 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
4769  // ----------- S t a t e -------------
4770  // -- a0 : callee
4771  // -- t0 : call_data
4772  // -- a2 : holder
4773  // -- a1 : api_function_address
4774  // -- cp : context
4775  // --
4776  // -- sp[0] : last argument
4777  // -- ...
4778  // -- sp[(argc - 1)* 4] : first argument
4779  // -- sp[argc * 4] : receiver
4780  // -----------------------------------
4781 
4782  Register callee = a0;
4783  Register call_data = t0;
4784  Register holder = a2;
4785  Register api_function_address = a1;
4786  Register context = cp;
4787 
4788  int argc = this->argc();
4789  bool is_store = this->is_store();
4791 
4792  typedef FunctionCallbackArguments FCA;
4793 
4794  STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4795  STATIC_ASSERT(FCA::kCalleeIndex == 5);
4796  STATIC_ASSERT(FCA::kDataIndex == 4);
4797  STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4798  STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4799  STATIC_ASSERT(FCA::kIsolateIndex == 1);
4800  STATIC_ASSERT(FCA::kHolderIndex == 0);
4801  STATIC_ASSERT(FCA::kArgsLength == 7);
4802 
4803  // Save context, callee and call data.
4804  __ Push(context, callee, call_data);
4805  // Load context from callee.
4806  __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
4807 
4808  Register scratch = call_data;
4809  if (!call_data_undefined) {
4810  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4811  }
4812  // Push return value and default return value.
4813  __ Push(scratch, scratch);
4814  __ li(scratch,
4815  Operand(ExternalReference::isolate_address(isolate())));
4816  // Push isolate and holder.
4817  __ Push(scratch, holder);
4818 
4819  // Prepare arguments.
4820  __ mov(scratch, sp);
4821 
4822  // Allocate the v8::Arguments structure in the arguments' space since
4823  // it's not controlled by GC.
4824  const int kApiStackSpace = 4;
4825 
4826  FrameScope frame_scope(masm, StackFrame::MANUAL);
4827  __ EnterExitFrame(false, kApiStackSpace);
4828 
4829  DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
4830  // a0 = FunctionCallbackInfo&
4831  // Arguments is after the return address.
4832  __ Addu(a0, sp, Operand(1 * kPointerSize));
4833  // FunctionCallbackInfo::implicit_args_
4834  __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
4835  // FunctionCallbackInfo::values_
4836  __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
4837  __ sw(at, MemOperand(a0, 1 * kPointerSize));
4838  // FunctionCallbackInfo::length_ = argc
4839  __ li(at, Operand(argc));
4840  __ sw(at, MemOperand(a0, 2 * kPointerSize));
4841  // FunctionCallbackInfo::is_construct_call = 0
4842  __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
4843 
4844  const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
4845  ExternalReference thunk_ref =
4846  ExternalReference::invoke_function_callback(isolate());
4847 
4848  AllowExternalCallThatCantCauseGC scope(masm);
4849  MemOperand context_restore_operand(
4850  fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
4851  // Stores return the first js argument.
4852  int return_value_offset = 0;
4853  if (is_store) {
4854  return_value_offset = 2 + FCA::kArgsLength;
4855  } else {
4856  return_value_offset = 2 + FCA::kReturnValueOffset;
4857  }
4858  MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
4859 
4860  __ CallApiFunctionAndReturn(api_function_address,
4861  thunk_ref,
4862  kStackUnwindSpace,
4863  return_value_operand,
4864  &context_restore_operand);
4865 }
4866 
4867 
4868 void CallApiGetterStub::Generate(MacroAssembler* masm) {
4869  // ----------- S t a t e -------------
4870  // -- sp[0] : name
4871  // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
4872  // -- ...
4873  // -- a2 : api_function_address
4874  // -----------------------------------
4875 
4876  Register api_function_address = ApiGetterDescriptor::function_address();
4877  DCHECK(api_function_address.is(a2));
4878 
4879  __ mov(a0, sp); // a0 = Handle<Name>
4880  __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
4881 
4882  const int kApiStackSpace = 1;
4883  FrameScope frame_scope(masm, StackFrame::MANUAL);
4884  __ EnterExitFrame(false, kApiStackSpace);
4885 
4886  // Create PropertyAccessorInfo instance on the stack above the exit frame with
4887  // a1 (internal::Object** args_) as the data.
4888  __ sw(a1, MemOperand(sp, 1 * kPointerSize));
4889  __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
4890 
4891  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
4892 
4893  ExternalReference thunk_ref =
4894  ExternalReference::invoke_accessor_getter_callback(isolate());
4895  __ CallApiFunctionAndReturn(api_function_address,
4896  thunk_ref,
4897  kStackUnwindSpace,
4898  MemOperand(fp, 6 * kPointerSize),
4899  NULL);
4900 }
4901 
4902 
4903 #undef __
4904 
4905 } } // namespace v8::internal
4906 
4907 #endif // V8_TARGET_ARCH_MIPS
#define kDoubleRegZero
#define kLithiumScratchDouble
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1591
static const int kTransitionInfoOffset
Definition: objects.h:8254
static const Register function_address()
void GenerateReadElement(MacroAssembler *masm)
void GenerateNewSloppySlow(MacroAssembler *masm)
void GenerateNewStrict(MacroAssembler *masm)
void GenerateNewSloppyFast(MacroAssembler *masm)
static void GenerateStubsAheadOfTime(Isolate *isolate)
void GenerateDispatchToArrayStub(MacroAssembler *masm, AllocationSiteOverrideMode mode)
ArgumentCountKey argument_count() const
Definition: code-stubs.h:732
static const int kInstrSize
friend class BlockTrampolinePoolScope
static const int kCallTargetAddressOffset
static void GenerateAheadOfTime(Isolate *isolate)
Definition: code-stubs.cc:266
static const U kShift
Definition: utils.h:204
static const U kMask
Definition: utils.h:203
bool save_doubles() const
Definition: code-stubs.h:1423
static void GenerateAheadOfTime(Isolate *isolate)
CEntryStub(Isolate *isolate, int result_size, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
Definition: code-stubs.h:1406
STATIC_ASSERT(Code::kArgumentsBits+2<=kStubMinorKeyBits)
bool CallAsMethod() const
Definition: code-stubs.h:811
void GenerateMiss(MacroAssembler *masm)
virtual InlineCacheState GetICState() const OVERRIDE
Definition: code-stubs.h:804
static const int kValueOffset
Definition: objects.h:9446
static const int kHeaderSize
Definition: objects.h:5373
Condition GetCondition() const
Definition: code-stubs.cc:354
void GenerateInternalizedStrings(MacroAssembler *masm)
void GenerateStrings(MacroAssembler *masm)
CompareICState::State state() const
Definition: code-stubs.h:1278
Token::Value op() const
Definition: code-stubs.h:1268
void GenerateMiss(MacroAssembler *masm)
CompareICState::State left() const
Definition: code-stubs.h:1272
void GenerateGeneric(MacroAssembler *masm)
CompareICState::State right() const
Definition: code-stubs.h:1275
void GenerateObjects(MacroAssembler *masm)
CompareICStub(Isolate *isolate, Token::Value op, CompareICState::State left, CompareICState::State right, CompareICState::State state)
Definition: code-stubs.h:1256
void GenerateNumbers(MacroAssembler *masm)
void GenerateUniqueNames(MacroAssembler *masm)
void GenerateKnownObjects(MacroAssembler *masm)
void GenerateSmis(MacroAssembler *masm)
static const int kFirstOffset
Definition: objects.h:9061
static const int kMinLength
Definition: objects.h:9066
static const int kSecondOffset
Definition: objects.h:9062
static int SlotOffset(int index)
Definition: contexts.h:552
static void GenerateAheadOfTime(Isolate *isolate)
Definition: code-stubs.cc:725
void GenerateCall(MacroAssembler *masm, Register target)
Register source() const
Definition: code-stubs.h:1901
Register destination() const
Definition: code-stubs.h:1904
static const int kCallerFPOffset
Definition: frames-arm.h:80
static const int kMaxShortLength
Definition: objects.h:9141
static const int kResourceDataOffset
Definition: objects.h:9138
static const int kLengthOffset
Definition: objects.h:2392
static const int kHeaderSize
Definition: objects.h:2393
static const int kNativeContextOffset
Definition: objects.h:7459
static const int kEntrySize
Definition: objects.h:3276
static const int kMantissaBits
Definition: objects.h:1525
static const uint32_t kSignMask
Definition: objects.h:1522
static const int kValueOffset
Definition: objects.h:1506
static const uint32_t kExponentMask
Definition: objects.h:1523
static const int kMantissaBitsInTopWord
Definition: objects.h:1531
static const int kExponentBits
Definition: objects.h:1526
static const int kExponentBias
Definition: objects.h:1527
static const int kExponentShift
Definition: objects.h:1528
static const int kNonMantissaBitsInTopWord
Definition: objects.h:1532
static const int kMapOffset
Definition: objects.h:1427
static const int kStrictArgumentsObjectSize
Definition: heap.h:674
static const int kSloppyArgumentsObjectSize
Definition: heap.h:671
static const int kArgumentsCalleeIndex
Definition: heap.h:679
static const int kArgumentsLengthIndex
Definition: heap.h:677
void GenerateLightweightMiss(MacroAssembler *masm, ExternalReference miss)
bool HasCallSiteInlineCheck() const
Definition: code-stubs.h:700
bool HasArgsInRegisters() const
Definition: code-stubs.h:698
bool ReturnTrueFalseObject() const
Definition: code-stubs.h:704
static void GenerateStubsAheadOfTime(Isolate *isolate)
void GenerateCase(MacroAssembler *masm, ElementsKind kind)
static const int kJSRegexpStaticOffsetsVectorSize
Definition: isolate.h:984
StackFrame::Type type() const
Definition: code-stubs.h:1454
static const int kSharedFunctionInfoOffset
Definition: objects.h:7379
static const int kContextOffset
Definition: objects.h:7381
static const int kLiteralsOffset
Definition: objects.h:7382
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7377
static const int kHeaderSize
Definition: objects.h:2195
static const int kPropertiesOffset
Definition: objects.h:2193
static const int kElementsOffset
Definition: objects.h:2194
static const int kDataOneByteCodeOffset
Definition: objects.h:7813
static const int kIrregexpCaptureCountOffset
Definition: objects.h:7817
static const int kDataTagOffset
Definition: objects.h:7811
static const int kDataOffset
Definition: objects.h:7771
static const int kDataUC16CodeOffset
Definition: objects.h:7815
static const Register ReceiverRegister()
static const Register NameRegister()
static const int kIsUndetectable
Definition: objects.h:6244
static const int kBitFieldOffset
Definition: objects.h:6228
static const int kInstanceTypeOffset
Definition: objects.h:6229
static const int kBitField2Offset
Definition: objects.h:6233
static const int kPrototypeOffset
Definition: objects.h:6190
ExponentType exponent_type() const
Definition: code-stubs.h:780
static const Register exponent()
static const size_t kWriteBarrierCounterOffset
Definition: spaces.h:536
static const int kEvacuationCandidateMask
Definition: spaces.h:398
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:400
NameDictionaryLookupStub(Isolate *isolate, LookupMode mode)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
static const int kHashShift
Definition: objects.h:8499
static const int kHashFieldOffset
Definition: objects.h:8486
static void GenerateLoadFunctionPrototype(MacroAssembler *masm, Register receiver, Register scratch1, Register scratch2, Label *miss_label)
static const intptr_t kPageAlignmentMask
Definition: spaces.h:757
virtual void Generate(MacroAssembler *masm)=0
ProfileEntryHookStub(Isolate *isolate)
Definition: code-stubs.h:2373
static void MaybeCallEntryHook(MacroAssembler *masm)
static void EntryHookTrampoline(intptr_t function, intptr_t stack_pointer, Isolate *isolate)
Definition: code-stubs.cc:925
void SaveCallerSaveRegisters(MacroAssembler *masm, SaveFPRegsMode mode)
void RestoreCallerSaveRegisters(MacroAssembler *masm, SaveFPRegsMode mode)
void GenerateIncremental(MacroAssembler *masm, Mode mode)
void InformIncrementalMarker(MacroAssembler *masm)
RememberedSetAction remembered_set_action() const
static void PatchBranchIntoNop(MacroAssembler *masm, int pos)
SaveFPRegsMode save_fp_regs_mode() const
void CheckNeedsToInformIncrementalMarker(MacroAssembler *masm, OnNoNeedToInformIncrementalMarker on_no_need, Mode mode)
virtual void Generate(MacroAssembler *masm) OVERRIDE
static const int kLastCaptureCountOffset
Definition: jsregexp.h:168
static const int kLastSubjectOffset
Definition: jsregexp.h:170
static const int kLastMatchOverhead
Definition: jsregexp.h:165
static const int kLastInputOffset
Definition: jsregexp.h:172
static const int kFirstCaptureOffset
Definition: jsregexp.h:174
static void GenerateAheadOfTime(Isolate *isolate)
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:9312
static const int kHeaderSize
Definition: objects.h:8941
static const int kConstructStubOffset
Definition: objects.h:6896
static const int kFeedbackVectorOffset
Definition: objects.h:6904
static const int kCompilerHintsOffset
Definition: objects.h:6961
static const int kMinLength
Definition: objects.h:9109
static const int kParentOffset
Definition: objects.h:9104
static const int kOffsetOffset
Definition: objects.h:9105
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
static const int kContextOffset
Definition: frames.h:162
static const int kCallerSPOffset
Definition: frames.h:167
static const int kCallerFPOffset
Definition: frames.h:165
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
StoreBufferOverflowStub(Isolate *isolate, SaveFPRegsMode save_fp)
Definition: code-stubs.h:2395
static void GenerateAheadOfTime(Isolate *isolate)
void GenerateFast(MacroAssembler *masm)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
void GenerateFast(MacroAssembler *masm)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static void GenerateOneByteCharsCompareLoop(MacroAssembler *masm, Register left, Register right, Register length, Register scratch1, Register scratch2, Label *chars_not_equal)
static void GenerateCompareFlatOneByteStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
static void GenerateCopyCharacters(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, String::Encoding encoding)
static void GenerateFlatOneByteStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8811
static const int kLengthOffset
Definition: objects.h:8802
static const int kCallerStackParameterCountFrameOffset
Definition: frames.h:755
StubFunctionMode function_mode() const
Definition: code-stubs.h:2360
static void GenerateAheadOfTime(Isolate *isolate)
Definition: code-stubs.cc:917
static bool IsOrderedRelationalCompareOp(Value op)
Definition: token.h:206
static bool IsEqualityOp(Value op)
Definition: token.h:210
static Handle< Object > UninitializedSentinel(Isolate *isolate)
static Handle< Object > MegamorphicSentinel(Isolate *isolate)
static const Register VectorRegister()
void Generate(MacroAssembler *masm)
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
WriteInt32ToHeapNumberStub(Isolate *isolate, Register the_int, Register the_heap_number, Register scratch)
@ kMips32r6
#define IsMipsArchVariant(check)
#define __
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define V8_INFINITY
Definition: globals.h:25
#define FUNCTION_ADDR(f)
Definition: globals.h:195
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
@ JUMP_FUNCTION
@ CALL_FUNCTION
AllocationFlags
@ SIZE_IN_WORDS
@ TAG_OBJECT
int int32_t
Definition: unicode.cc:24
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
const int kPointerSize
Definition: globals.h:129
const FPURegister f14
const uint32_t kStringEncodingMask
Definition: objects.h:555
@ DONT_DO_SMI_CHECK
Definition: globals.h:640
const FPUControlRegister FCSR
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number)
@ DONT_TRACK_ALLOCATION_SITE
Definition: objects.h:8084
@ kSeqStringTag
Definition: objects.h:563
@ kConsStringTag
Definition: objects.h:564
@ kSlicedStringTag
Definition: objects.h:566
@ kExternalStringTag
Definition: objects.h:565
const RegList kJSCallerSaved
Definition: frames-arm.h:24
const Register cp
@ kCheckForInexactConversion
const intptr_t kPointerAlignmentMask
Definition: globals.h:231
const intptr_t kSmiSignMask
Definition: globals.h:223
const uint32_t kTwoByteStringTag
Definition: objects.h:556
const SwVfpRegister s1
const uint32_t kShortExternalStringTag
Definition: objects.h:590
const RegList kCalleeSaved
Definition: frames-arm.h:38
const int kSmiTagSize
Definition: v8.h:5743
const int kFastElementsKindPackedToHoley
Definition: elements-kind.h:71
const SwVfpRegister s2
const int kDoubleSize
Definition: globals.h:127
const FPURegister f2
const uint32_t kNotStringTag
Definition: objects.h:545
const SwVfpRegister s0
const Register fp
DwVfpRegister DoubleRegister
const uint32_t kFCSRUnderflowFlagMask
const FPURegister f4
const Address kZapValue
Definition: globals.h:269
@ JS_FUNCTION_STUB_MODE
Definition: code-stubs.h:350
const FPURegister f6
const Register sp
const int kPointerSizeLog2
Definition: globals.h:147
const uint32_t kStringTag
Definition: objects.h:544
@ JS_REGEXP_TYPE
Definition: objects.h:748
@ JS_ARRAY_TYPE
Definition: objects.h:738
@ FIXED_ARRAY_TYPE
Definition: objects.h:717
@ JS_OBJECT_TYPE
Definition: objects.h:731
@ FIRST_NONSTRING_TYPE
Definition: objects.h:758
@ ODDBALL_TYPE
Definition: objects.h:663
@ FIRST_SPEC_OBJECT_TYPE
Definition: objects.h:781
@ LAST_SPEC_OBJECT_TYPE
Definition: objects.h:782
@ HEAP_NUMBER_TYPE
Definition: objects.h:669
@ JS_FUNCTION_TYPE
Definition: objects.h:749
@ JS_FUNCTION_PROXY_TYPE
Definition: objects.h:726
@ FAST_HOLEY_DOUBLE_ELEMENTS
Definition: elements-kind.h:27
@ TERMINAL_FAST_ELEMENTS_KIND
Definition: elements-kind.h:63
@ FAST_HOLEY_SMI_ELEMENTS
Definition: elements-kind.h:17
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:146
const uint32_t kOneByteStringTag
Definition: objects.h:557
MemOperand FieldMemOperand(Register object, int offset)
const intptr_t kObjectAlignmentMask
Definition: globals.h:227
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind)
bool IsFastPackedElementsKind(ElementsKind kind)
const FPURegister f12
const bool FLAG_enable_slow_asserts
Definition: checks.h:31
const uint32_t kShortExternalStringMask
Definition: objects.h:589
const uint32_t kFCSRInvalidOpFlagMask
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
AllocationSiteOverrideMode
Definition: code-stubs.h:716
@ DISABLE_ALLOCATION_SITES
Definition: code-stubs.h:718
const uint32_t kStringRepresentationMask
Definition: objects.h:561
uint32_t RegList
Definition: frames.h:18
byte * Address
Definition: globals.h:101
const uint32_t kFCSROverflowFlagMask
const uint32_t kSlicedNotConsMask
Definition: objects.h:579
const int kCArgsSlotsSize
const int kHeapObjectTag
Definition: v8.h:5737
const int kSmiShiftSize
Definition: v8.h:5805
const Register no_reg
const uint32_t kInternalizedTag
Definition: objects.h:551
const int kNumJSCallerSaved
Definition: frames-arm.h:30
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const intptr_t kSmiTagMask
Definition: v8.h:5744
const uint32_t kIsNotInternalizedMask
Definition: objects.h:549
const int kSmiTag
Definition: v8.h:5742
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
const uint32_t kIsNotStringMask
Definition: objects.h:543
const int kNumCalleeSavedFPU
Definition: frames-mips.h:64
const int kNumCalleeSaved
Definition: frames-arm.h:58
ElementsKind GetInitialFastElementsKind()
Definition: elements-kind.h:78
const RegList kCalleeSavedFPU
Definition: frames-mips.h:56
@ STRING_INDEX_IS_NUMBER
Definition: code-stubs.h:1590
@ STRING_INDEX_IS_ARRAY_INDEX
Definition: code-stubs.h:1595
const uint32_t kIsIndirectStringMask
Definition: objects.h:568
const FPURegister f0
const SwVfpRegister s5
const FPURegister f8
const RegList kCallerSavedFPU
Definition: frames-mips.h:66
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
static Handle< Value > Throw(Isolate *isolate, const char *message)
Definition: d8.cc:72
bool is(Register reg) const
#define T(name, string, precedence)
Definition: token.cc:25