V8 Project
code-stubs-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_ARM
8 
9 #include "src/base/bits.h"
10 #include "src/bootstrapper.h"
11 #include "src/code-stubs.h"
12 #include "src/codegen.h"
14 #include "src/ic/ic.h"
15 #include "src/isolate.h"
16 #include "src/jsregexp.h"
18 #include "src/runtime/runtime.h"
19 
20 namespace v8 {
21 namespace internal {
22 
23 
24 static void InitializeArrayConstructorDescriptor(
25  Isolate* isolate, CodeStubDescriptor* descriptor,
26  int constant_stack_parameter_count) {
27  Address deopt_handler = Runtime::FunctionForId(
28  Runtime::kArrayConstructor)->entry;
29 
30  if (constant_stack_parameter_count == 0) {
31  descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
33  } else {
34  descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
36  }
37 }
38 
39 
40 static void InitializeInternalArrayConstructorDescriptor(
41  Isolate* isolate, CodeStubDescriptor* descriptor,
42  int constant_stack_parameter_count) {
43  Address deopt_handler = Runtime::FunctionForId(
44  Runtime::kInternalArrayConstructor)->entry;
45 
46  if (constant_stack_parameter_count == 0) {
47  descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
49  } else {
50  descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
52  }
53 }
54 
55 
56 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
57  CodeStubDescriptor* descriptor) {
58  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
59 }
60 
61 
62 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
63  CodeStubDescriptor* descriptor) {
64  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
65 }
66 
67 
68 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
69  CodeStubDescriptor* descriptor) {
70  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
71 }
72 
73 
74 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
75  CodeStubDescriptor* descriptor) {
76  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
77 }
78 
79 
80 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
81  CodeStubDescriptor* descriptor) {
82  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
83 }
84 
85 
86 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
87  CodeStubDescriptor* descriptor) {
88  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
89 }
90 
91 
92 #define __ ACCESS_MASM(masm)
93 
94 
95 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
96  Label* slow,
97  Condition cond);
98 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
99  Register lhs,
100  Register rhs,
101  Label* lhs_not_nan,
102  Label* slow,
103  bool strict);
104 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
105  Register lhs,
106  Register rhs);
107 
108 
109 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
110  ExternalReference miss) {
111  // Update the static counter each time a new code stub is generated.
112  isolate()->counters()->code_stubs()->Increment();
113 
114  CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
115  int param_count = descriptor.GetEnvironmentParameterCount();
116  {
117  // Call the runtime system in a fresh internal frame.
118  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
119  DCHECK(param_count == 0 ||
120  r0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
121  // Push arguments
122  for (int i = 0; i < param_count; ++i) {
123  __ push(descriptor.GetEnvironmentParameterRegister(i));
124  }
125  __ CallExternalReference(miss, param_count);
126  }
127 
128  __ Ret();
129 }
130 
131 
132 void DoubleToIStub::Generate(MacroAssembler* masm) {
133  Label out_of_range, only_low, negate, done;
134  Register input_reg = source();
135  Register result_reg = destination();
137 
138  int double_offset = offset();
139  // Account for saved regs if input is sp.
140  if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
141 
142  Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
143  Register scratch_low =
144  GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
145  Register scratch_high =
146  GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
147  LowDwVfpRegister double_scratch = kScratchDoubleReg;
148 
149  __ Push(scratch_high, scratch_low, scratch);
150 
151  if (!skip_fastpath()) {
152  // Load double input.
153  __ vldr(double_scratch, MemOperand(input_reg, double_offset));
154  __ vmov(scratch_low, scratch_high, double_scratch);
155 
156  // Do fast-path convert from double to int.
157  __ vcvt_s32_f64(double_scratch.low(), double_scratch);
158  __ vmov(result_reg, double_scratch.low());
159 
160  // If result is not saturated (0x7fffffff or 0x80000000), we are done.
161  __ sub(scratch, result_reg, Operand(1));
162  __ cmp(scratch, Operand(0x7ffffffe));
163  __ b(lt, &done);
164  } else {
165  // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
166  // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
167  if (double_offset == 0) {
168  __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
169  } else {
170  __ ldr(scratch_low, MemOperand(input_reg, double_offset));
171  __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
172  }
173  }
174 
175  __ Ubfx(scratch, scratch_high,
177  // Load scratch with exponent - 1. This is faster than loading
178  // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
180  __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
181  // If exponent is greater than or equal to 84, the 32 less significant
182  // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
183  // the result is 0.
184  // Compare exponent with 84 (compare exponent - 1 with 83).
185  __ cmp(scratch, Operand(83));
186  __ b(ge, &out_of_range);
187 
188  // If we reach this code, 31 <= exponent <= 83.
189  // So, we don't have to handle cases where 0 <= exponent <= 20 for
190  // which we would need to shift right the high part of the mantissa.
191  // Scratch contains exponent - 1.
192  // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
193  __ rsb(scratch, scratch, Operand(51), SetCC);
194  __ b(ls, &only_low);
195  // 21 <= exponent <= 51, shift scratch_low and scratch_high
196  // to generate the result.
197  __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
198  // Scratch contains: 52 - exponent.
199  // We needs: exponent - 20.
200  // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
201  __ rsb(scratch, scratch, Operand(32));
202  __ Ubfx(result_reg, scratch_high,
204  // Set the implicit 1 before the mantissa part in scratch_high.
205  __ orr(result_reg, result_reg,
206  Operand(1 << HeapNumber::kMantissaBitsInTopWord));
207  __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
208  __ b(&negate);
209 
210  __ bind(&out_of_range);
211  __ mov(result_reg, Operand::Zero());
212  __ b(&done);
213 
214  __ bind(&only_low);
215  // 52 <= exponent <= 83, shift only scratch_low.
216  // On entry, scratch contains: 52 - exponent.
217  __ rsb(scratch, scratch, Operand::Zero());
218  __ mov(result_reg, Operand(scratch_low, LSL, scratch));
219 
220  __ bind(&negate);
221  // If input was positive, scratch_high ASR 31 equals 0 and
222  // scratch_high LSR 31 equals zero.
223  // New result = (result eor 0) + 0 = result.
224  // If the input was negative, we have to negate the result.
225  // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
226  // New result = (result eor 0xffffffff) + 1 = 0 - result.
227  __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
228  __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
229 
230  __ bind(&done);
231 
232  __ Pop(scratch_high, scratch_low, scratch);
233  __ Ret();
234 }
235 
236 
238  Isolate* isolate) {
239  WriteInt32ToHeapNumberStub stub1(isolate, r1, r0, r2);
240  WriteInt32ToHeapNumberStub stub2(isolate, r2, r0, r3);
241  stub1.GetCode();
242  stub2.GetCode();
243 }
244 
245 
246 // See comment for class.
247 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
248  Label max_negative_int;
249  // the_int_ has the answer which is a signed int32 but not a Smi.
250  // We test for the special value that has a different exponent. This test
251  // has the neat side effect of setting the flags according to the sign.
252  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
253  __ cmp(the_int(), Operand(0x80000000u));
254  __ b(eq, &max_negative_int);
255  // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
256  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
257  uint32_t non_smi_exponent =
259  __ mov(scratch(), Operand(non_smi_exponent));
260  // Set the sign bit in scratch_ if the value was negative.
261  __ orr(scratch(), scratch(), Operand(HeapNumber::kSignMask), LeaveCC, cs);
262  // Subtract from 0 if the value was negative.
263  __ rsb(the_int(), the_int(), Operand::Zero(), LeaveCC, cs);
264  // We should be masking the implict first digit of the mantissa away here,
265  // but it just ends up combining harmlessly with the last digit of the
266  // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
267  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
268  DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
269  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
270  __ orr(scratch(), scratch(), Operand(the_int(), LSR, shift_distance));
271  __ str(scratch(),
272  FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
273  __ mov(scratch(), Operand(the_int(), LSL, 32 - shift_distance));
274  __ str(scratch(),
275  FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
276  __ Ret();
277 
278  __ bind(&max_negative_int);
279  // The max negative int32 is stored as a positive number in the mantissa of
280  // a double because it uses a sign bit instead of using two's complement.
281  // The actual mantissa bits stored are all 0 because the implicit most
282  // significant 1 bit is not stored.
283  non_smi_exponent += 1 << HeapNumber::kExponentShift;
284  __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
285  __ str(ip, FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
286  __ mov(ip, Operand::Zero());
287  __ str(ip, FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
288  __ Ret();
289 }
290 
291 
292 // Handle the case where the lhs and rhs are the same object.
293 // Equality is almost reflexive (everything but NaN), so this is a test
294 // for "identity and not NaN".
295 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
296  Label* slow,
297  Condition cond) {
298  Label not_identical;
299  Label heap_number, return_equal;
300  __ cmp(r0, r1);
301  __ b(ne, &not_identical);
302 
303  // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
304  // so we do the second best thing - test it ourselves.
305  // They are both equal and they are not both Smis so both of them are not
306  // Smis. If it's not a heap number, then return equal.
307  if (cond == lt || cond == gt) {
308  __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
309  __ b(ge, slow);
310  } else {
311  __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
312  __ b(eq, &heap_number);
313  // Comparing JS objects with <=, >= is complicated.
314  if (cond != eq) {
315  __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
316  __ b(ge, slow);
317  // Normally here we fall through to return_equal, but undefined is
318  // special: (undefined == undefined) == true, but
319  // (undefined <= undefined) == false! See ECMAScript 11.8.5.
320  if (cond == le || cond == ge) {
321  __ cmp(r4, Operand(ODDBALL_TYPE));
322  __ b(ne, &return_equal);
323  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
324  __ cmp(r0, r2);
325  __ b(ne, &return_equal);
326  if (cond == le) {
327  // undefined <= undefined should fail.
328  __ mov(r0, Operand(GREATER));
329  } else {
330  // undefined >= undefined should fail.
331  __ mov(r0, Operand(LESS));
332  }
333  __ Ret();
334  }
335  }
336  }
337 
338  __ bind(&return_equal);
339  if (cond == lt) {
340  __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
341  } else if (cond == gt) {
342  __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
343  } else {
344  __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
345  }
346  __ Ret();
347 
348  // For less and greater we don't have to check for NaN since the result of
349  // x < x is false regardless. For the others here is some code to check
350  // for NaN.
351  if (cond != lt && cond != gt) {
352  __ bind(&heap_number);
353  // It is a heap number, so return non-equal if it's NaN and equal if it's
354  // not NaN.
355 
356  // The representation of NaN values has all exponent bits (52..62) set,
357  // and not all mantissa bits (0..51) clear.
358  // Read top bits of double representation (second word of value).
359  __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
360  // Test that exponent bits are all set.
362  // NaNs have all-one exponents so they sign extend to -1.
363  __ cmp(r3, Operand(-1));
364  __ b(ne, &return_equal);
365 
366  // Shift out flag and all exponent bits, retaining only mantissa.
368  // Or with all low-bits of mantissa.
369  __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
370  __ orr(r0, r3, Operand(r2), SetCC);
371  // For equal we already have the right value in r0: Return zero (equal)
372  // if all bits in mantissa are zero (it's an Infinity) and non-zero if
373  // not (it's a NaN). For <= and >= we need to load r0 with the failing
374  // value if it's a NaN.
375  if (cond != eq) {
376  // All-zero means Infinity means equal.
377  __ Ret(eq);
378  if (cond == le) {
379  __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
380  } else {
381  __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
382  }
383  }
384  __ Ret();
385  }
386  // No fall through here.
387 
388  __ bind(&not_identical);
389 }
390 
391 
392 // See comment at call site.
393 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
394  Register lhs,
395  Register rhs,
396  Label* lhs_not_nan,
397  Label* slow,
398  bool strict) {
399  DCHECK((lhs.is(r0) && rhs.is(r1)) ||
400  (lhs.is(r1) && rhs.is(r0)));
401 
402  Label rhs_is_smi;
403  __ JumpIfSmi(rhs, &rhs_is_smi);
404 
405  // Lhs is a Smi. Check whether the rhs is a heap number.
406  __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
407  if (strict) {
408  // If rhs is not a number and lhs is a Smi then strict equality cannot
409  // succeed. Return non-equal
410  // If rhs is r0 then there is already a non zero value in it.
411  if (!rhs.is(r0)) {
412  __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
413  }
414  __ Ret(ne);
415  } else {
416  // Smi compared non-strictly with a non-Smi non-heap-number. Call
417  // the runtime.
418  __ b(ne, slow);
419  }
420 
421  // Lhs is a smi, rhs is a number.
422  // Convert lhs to a double in d7.
423  __ SmiToDouble(d7, lhs);
424  // Load the double from rhs, tagged HeapNumber r0, to d6.
426 
427  // We now have both loaded as doubles but we can skip the lhs nan check
428  // since it's a smi.
429  __ jmp(lhs_not_nan);
430 
431  __ bind(&rhs_is_smi);
432  // Rhs is a smi. Check whether the non-smi lhs is a heap number.
433  __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
434  if (strict) {
435  // If lhs is not a number and rhs is a smi then strict equality cannot
436  // succeed. Return non-equal.
437  // If lhs is r0 then there is already a non zero value in it.
438  if (!lhs.is(r0)) {
439  __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
440  }
441  __ Ret(ne);
442  } else {
443  // Smi compared non-strictly with a non-smi non-heap-number. Call
444  // the runtime.
445  __ b(ne, slow);
446  }
447 
448  // Rhs is a smi, lhs is a heap number.
449  // Load the double from lhs, tagged HeapNumber r1, to d7.
451  // Convert rhs to a double in d6 .
452  __ SmiToDouble(d6, rhs);
453  // Fall through to both_loaded_as_doubles.
454 }
455 
456 
457 // See comment at call site.
458 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
459  Register lhs,
460  Register rhs) {
461  DCHECK((lhs.is(r0) && rhs.is(r1)) ||
462  (lhs.is(r1) && rhs.is(r0)));
463 
464  // If either operand is a JS object or an oddball value, then they are
465  // not equal since their pointers are different.
466  // There is no test for undetectability in strict equality.
468  Label first_non_object;
469  // Get the type of the first operand into r2 and compare it with
470  // FIRST_SPEC_OBJECT_TYPE.
471  __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
472  __ b(lt, &first_non_object);
473 
474  // Return non-zero (r0 is not zero)
475  Label return_not_equal;
476  __ bind(&return_not_equal);
477  __ Ret();
478 
479  __ bind(&first_non_object);
480  // Check for oddballs: true, false, null, undefined.
481  __ cmp(r2, Operand(ODDBALL_TYPE));
482  __ b(eq, &return_not_equal);
483 
484  __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
485  __ b(ge, &return_not_equal);
486 
487  // Check for oddballs: true, false, null, undefined.
488  __ cmp(r3, Operand(ODDBALL_TYPE));
489  __ b(eq, &return_not_equal);
490 
491  // Now that we have the types we might as well check for
492  // internalized-internalized.
494  __ orr(r2, r2, Operand(r3));
495  __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
496  __ b(eq, &return_not_equal);
497 }
498 
499 
500 // See comment at call site.
501 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
502  Register lhs,
503  Register rhs,
504  Label* both_loaded_as_doubles,
505  Label* not_heap_numbers,
506  Label* slow) {
507  DCHECK((lhs.is(r0) && rhs.is(r1)) ||
508  (lhs.is(r1) && rhs.is(r0)));
509 
510  __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
511  __ b(ne, not_heap_numbers);
513  __ cmp(r2, r3);
514  __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
515 
516  // Both are heap numbers. Load them up then jump to the code we have
517  // for that.
520  __ jmp(both_loaded_as_doubles);
521 }
522 
523 
524 // Fast negative check for internalized-to-internalized equality.
525 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
526  Register lhs,
527  Register rhs,
528  Label* possible_strings,
529  Label* not_both_strings) {
530  DCHECK((lhs.is(r0) && rhs.is(r1)) ||
531  (lhs.is(r1) && rhs.is(r0)));
532 
533  // r2 is object type of rhs.
534  Label object_test;
536  __ tst(r2, Operand(kIsNotStringMask));
537  __ b(ne, &object_test);
538  __ tst(r2, Operand(kIsNotInternalizedMask));
539  __ b(ne, possible_strings);
540  __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
541  __ b(ge, not_both_strings);
542  __ tst(r3, Operand(kIsNotInternalizedMask));
543  __ b(ne, possible_strings);
544 
545  // Both are internalized. We already checked they weren't the same pointer
546  // so they are not equal.
547  __ mov(r0, Operand(NOT_EQUAL));
548  __ Ret();
549 
550  __ bind(&object_test);
551  __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
552  __ b(lt, not_both_strings);
553  __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
554  __ b(lt, not_both_strings);
555  // If both objects are undetectable, they are equal. Otherwise, they
556  // are not equal, since they are different objects and an object is not
557  // equal to undefined.
561  __ and_(r0, r2, Operand(r3));
562  __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
563  __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
564  __ Ret();
565 }
566 
567 
568 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
569  Register scratch,
570  CompareICState::State expected,
571  Label* fail) {
572  Label ok;
573  if (expected == CompareICState::SMI) {
574  __ JumpIfNotSmi(input, fail);
575  } else if (expected == CompareICState::NUMBER) {
576  __ JumpIfSmi(input, &ok);
577  __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
579  }
580  // We could be strict about internalized/non-internalized here, but as long as
581  // hydrogen doesn't care, the stub doesn't have to care either.
582  __ bind(&ok);
583 }
584 
585 
586 // On entry r1 and r2 are the values to be compared.
587 // On exit r0 is 0, positive or negative to indicate the result of
588 // the comparison.
589 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
590  Register lhs = r1;
591  Register rhs = r0;
593 
594  Label miss;
595  CompareICStub_CheckInputType(masm, lhs, r2, left(), &miss);
596  CompareICStub_CheckInputType(masm, rhs, r3, right(), &miss);
597 
598  Label slow; // Call builtin.
599  Label not_smis, both_loaded_as_doubles, lhs_not_nan;
600 
601  Label not_two_smis, smi_done;
602  __ orr(r2, r1, r0);
603  __ JumpIfNotSmi(r2, &not_two_smis);
604  __ mov(r1, Operand(r1, ASR, 1));
605  __ sub(r0, r1, Operand(r0, ASR, 1));
606  __ Ret();
607  __ bind(&not_two_smis);
608 
609  // NOTICE! This code is only reached after a smi-fast-case check, so
610  // it is certain that at least one operand isn't a smi.
611 
612  // Handle the case where the objects are identical. Either returns the answer
613  // or goes to slow. Only falls through if the objects were not identical.
614  EmitIdenticalObjectComparison(masm, &slow, cc);
615 
616  // If either is a Smi (we know that not both are), then they can only
617  // be strictly equal if the other is a HeapNumber.
618  STATIC_ASSERT(kSmiTag == 0);
619  DCHECK_EQ(0, Smi::FromInt(0));
620  __ and_(r2, lhs, Operand(rhs));
621  __ JumpIfNotSmi(r2, &not_smis);
622  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
623  // 1) Return the answer.
624  // 2) Go to slow.
625  // 3) Fall through to both_loaded_as_doubles.
626  // 4) Jump to lhs_not_nan.
627  // In cases 3 and 4 we have found out we were dealing with a number-number
628  // comparison. If VFP3 is supported the double values of the numbers have
629  // been loaded into d7 and d6. Otherwise, the double values have been loaded
630  // into r0, r1, r2, and r3.
631  EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
632 
633  __ bind(&both_loaded_as_doubles);
634  // The arguments have been converted to doubles and stored in d6 and d7, if
635  // VFP3 is supported, or in r0, r1, r2, and r3.
636  __ bind(&lhs_not_nan);
637  Label no_nan;
638  // ARMv7 VFP3 instructions to implement double precision comparison.
639  __ VFPCompareAndSetFlags(d7, d6);
640  Label nan;
641  __ b(vs, &nan);
642  __ mov(r0, Operand(EQUAL), LeaveCC, eq);
643  __ mov(r0, Operand(LESS), LeaveCC, lt);
644  __ mov(r0, Operand(GREATER), LeaveCC, gt);
645  __ Ret();
646 
647  __ bind(&nan);
648  // If one of the sides was a NaN then the v flag is set. Load r0 with
649  // whatever it takes to make the comparison fail, since comparisons with NaN
650  // always fail.
651  if (cc == lt || cc == le) {
652  __ mov(r0, Operand(GREATER));
653  } else {
654  __ mov(r0, Operand(LESS));
655  }
656  __ Ret();
657 
658  __ bind(&not_smis);
659  // At this point we know we are dealing with two different objects,
660  // and neither of them is a Smi. The objects are in rhs_ and lhs_.
661  if (strict()) {
662  // This returns non-equal for some object types, or falls through if it
663  // was not lucky.
664  EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
665  }
666 
667  Label check_for_internalized_strings;
668  Label flat_string_check;
669  // Check for heap-number-heap-number comparison. Can jump to slow case,
670  // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
671  // that case. If the inputs are not doubles then jumps to
672  // check_for_internalized_strings.
673  // In this case r2 will contain the type of rhs_. Never falls through.
674  EmitCheckForTwoHeapNumbers(masm,
675  lhs,
676  rhs,
677  &both_loaded_as_doubles,
678  &check_for_internalized_strings,
679  &flat_string_check);
680 
681  __ bind(&check_for_internalized_strings);
682  // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
683  // internalized strings.
684  if (cc == eq && !strict()) {
685  // Returns an answer for two internalized strings or two detectable objects.
686  // Otherwise jumps to string case or not both strings case.
687  // Assumes that r2 is the type of rhs_ on entry.
688  EmitCheckForInternalizedStringsOrObjects(
689  masm, lhs, rhs, &flat_string_check, &slow);
690  }
691 
692  // Check for both being sequential one-byte strings,
693  // and inline if that is the case.
694  __ bind(&flat_string_check);
695 
696  __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r2, r3, &slow);
697 
698  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
699  r3);
700  if (cc == eq) {
702  } else {
704  r5);
705  }
706  // Never falls through to here.
707 
708  __ bind(&slow);
709 
710  __ Push(lhs, rhs);
711  // Figure out which native to call and setup the arguments.
712  Builtins::JavaScript native;
713  if (cc == eq) {
714  native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
715  } else {
716  native = Builtins::COMPARE;
717  int ncr; // NaN compare result
718  if (cc == lt || cc == le) {
719  ncr = GREATER;
720  } else {
721  DCHECK(cc == gt || cc == ge); // remaining cases
722  ncr = LESS;
723  }
724  __ mov(r0, Operand(Smi::FromInt(ncr)));
725  __ push(r0);
726  }
727 
728  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
729  // tagged as a small integer.
730  __ InvokeBuiltin(native, JUMP_FUNCTION);
731 
732  __ bind(&miss);
733  GenerateMiss(masm);
734 }
735 
736 
737 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
738  // We don't allow a GC during a store buffer overflow so there is no need to
739  // store the registers in any particular way, but we do have to store and
740  // restore them.
741  __ stm(db_w, sp, kCallerSaved | lr.bit());
742 
743  const Register scratch = r1;
744 
745  if (save_doubles()) {
746  __ SaveFPRegs(sp, scratch);
747  }
748  const int argument_count = 1;
749  const int fp_argument_count = 0;
750 
751  AllowExternalCallThatCantCauseGC scope(masm);
752  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
753  __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
754  __ CallCFunction(
755  ExternalReference::store_buffer_overflow_function(isolate()),
756  argument_count);
757  if (save_doubles()) {
758  __ RestoreFPRegs(sp, scratch);
759  }
760  __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
761 }
762 
763 
764 void MathPowStub::Generate(MacroAssembler* masm) {
765  const Register base = r1;
766  const Register exponent = MathPowTaggedDescriptor::exponent();
767  DCHECK(exponent.is(r2));
768  const Register heapnumbermap = r5;
769  const Register heapnumber = r0;
770  const DwVfpRegister double_base = d0;
771  const DwVfpRegister double_exponent = d1;
772  const DwVfpRegister double_result = d2;
773  const DwVfpRegister double_scratch = d3;
774  const SwVfpRegister single_scratch = s6;
775  const Register scratch = r9;
776  const Register scratch2 = r4;
777 
778  Label call_runtime, done, int_exponent;
779  if (exponent_type() == ON_STACK) {
780  Label base_is_smi, unpack_exponent;
781  // The exponent and base are supplied as arguments on the stack.
782  // This can only happen if the stub is called from non-optimized code.
783  // Load input parameters from stack to double registers.
784  __ ldr(base, MemOperand(sp, 1 * kPointerSize));
785  __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
786 
787  __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
788 
789  __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
790  __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
791  __ cmp(scratch, heapnumbermap);
792  __ b(ne, &call_runtime);
793 
794  __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
795  __ jmp(&unpack_exponent);
796 
797  __ bind(&base_is_smi);
798  __ vmov(single_scratch, scratch);
799  __ vcvt_f64_s32(double_base, single_scratch);
800  __ bind(&unpack_exponent);
801 
802  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
803 
804  __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
805  __ cmp(scratch, heapnumbermap);
806  __ b(ne, &call_runtime);
807  __ vldr(double_exponent,
809  } else if (exponent_type() == TAGGED) {
810  // Base is already in double_base.
811  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
812 
813  __ vldr(double_exponent,
815  }
816 
817  if (exponent_type() != INTEGER) {
818  Label int_exponent_convert;
819  // Detect integer exponents stored as double.
820  __ vcvt_u32_f64(single_scratch, double_exponent);
821  // We do not check for NaN or Infinity here because comparing numbers on
822  // ARM correctly distinguishes NaNs. We end up calling the built-in.
823  __ vcvt_f64_u32(double_scratch, single_scratch);
824  __ VFPCompareAndSetFlags(double_scratch, double_exponent);
825  __ b(eq, &int_exponent_convert);
826 
827  if (exponent_type() == ON_STACK) {
828  // Detect square root case. Crankshaft detects constant +/-0.5 at
829  // compile time and uses DoMathPowHalf instead. We then skip this check
830  // for non-constant cases of +/-0.5 as these hardly occur.
831  Label not_plus_half;
832 
833  // Test for 0.5.
834  __ vmov(double_scratch, 0.5, scratch);
835  __ VFPCompareAndSetFlags(double_exponent, double_scratch);
836  __ b(ne, &not_plus_half);
837 
838  // Calculates square root of base. Check for the special case of
839  // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
840  __ vmov(double_scratch, -V8_INFINITY, scratch);
841  __ VFPCompareAndSetFlags(double_base, double_scratch);
842  __ vneg(double_result, double_scratch, eq);
843  __ b(eq, &done);
844 
845  // Add +0 to convert -0 to +0.
846  __ vadd(double_scratch, double_base, kDoubleRegZero);
847  __ vsqrt(double_result, double_scratch);
848  __ jmp(&done);
849 
850  __ bind(&not_plus_half);
851  __ vmov(double_scratch, -0.5, scratch);
852  __ VFPCompareAndSetFlags(double_exponent, double_scratch);
853  __ b(ne, &call_runtime);
854 
855  // Calculates square root of base. Check for the special case of
856  // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
857  __ vmov(double_scratch, -V8_INFINITY, scratch);
858  __ VFPCompareAndSetFlags(double_base, double_scratch);
859  __ vmov(double_result, kDoubleRegZero, eq);
860  __ b(eq, &done);
861 
862  // Add +0 to convert -0 to +0.
863  __ vadd(double_scratch, double_base, kDoubleRegZero);
864  __ vmov(double_result, 1.0, scratch);
865  __ vsqrt(double_scratch, double_scratch);
866  __ vdiv(double_result, double_result, double_scratch);
867  __ jmp(&done);
868  }
869 
870  __ push(lr);
871  {
872  AllowExternalCallThatCantCauseGC scope(masm);
873  __ PrepareCallCFunction(0, 2, scratch);
874  __ MovToFloatParameters(double_base, double_exponent);
875  __ CallCFunction(
876  ExternalReference::power_double_double_function(isolate()),
877  0, 2);
878  }
879  __ pop(lr);
880  __ MovFromFloatResult(double_result);
881  __ jmp(&done);
882 
883  __ bind(&int_exponent_convert);
884  __ vcvt_u32_f64(single_scratch, double_exponent);
885  __ vmov(scratch, single_scratch);
886  }
887 
888  // Calculate power with integer exponent.
889  __ bind(&int_exponent);
890 
891  // Get two copies of exponent in the registers scratch and exponent.
892  if (exponent_type() == INTEGER) {
893  __ mov(scratch, exponent);
894  } else {
895  // Exponent has previously been stored into scratch as untagged integer.
896  __ mov(exponent, scratch);
897  }
898  __ vmov(double_scratch, double_base); // Back up base.
899  __ vmov(double_result, 1.0, scratch2);
900 
901  // Get absolute value of exponent.
902  __ cmp(scratch, Operand::Zero());
903  __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
904  __ sub(scratch, scratch2, scratch, LeaveCC, mi);
905 
906  Label while_true;
907  __ bind(&while_true);
908  __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
909  __ vmul(double_result, double_result, double_scratch, cs);
910  __ vmul(double_scratch, double_scratch, double_scratch, ne);
911  __ b(ne, &while_true);
912 
913  __ cmp(exponent, Operand::Zero());
914  __ b(ge, &done);
915  __ vmov(double_scratch, 1.0, scratch);
916  __ vdiv(double_result, double_scratch, double_result);
917  // Test whether result is zero. Bail out to check for subnormal result.
918  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
919  __ VFPCompareAndSetFlags(double_result, 0.0);
920  __ b(ne, &done);
921  // double_exponent may not containe the exponent value if the input was a
922  // smi. We set it with exponent value before bailing out.
923  __ vmov(single_scratch, exponent);
924  __ vcvt_f64_s32(double_exponent, single_scratch);
925 
926  // Returning or bailing out.
927  Counters* counters = isolate()->counters();
928  if (exponent_type() == ON_STACK) {
929  // The arguments are still on the stack.
930  __ bind(&call_runtime);
931  __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
932 
933  // The stub is called from non-optimized code, which expects the result
934  // as heap number in exponent.
935  __ bind(&done);
936  __ AllocateHeapNumber(
937  heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
938  __ vstr(double_result,
940  DCHECK(heapnumber.is(r0));
941  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
942  __ Ret(2);
943  } else {
944  __ push(lr);
945  {
946  AllowExternalCallThatCantCauseGC scope(masm);
947  __ PrepareCallCFunction(0, 2, scratch);
948  __ MovToFloatParameters(double_base, double_exponent);
949  __ CallCFunction(
950  ExternalReference::power_double_double_function(isolate()),
951  0, 2);
952  }
953  __ pop(lr);
954  __ MovFromFloatResult(double_result);
955 
956  __ bind(&done);
957  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
958  __ Ret();
959  }
960 }
961 
962 
964  return true;
965 }
966 
967 
968 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
976  BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
977 }
978 
979 
980 void CodeStub::GenerateFPStubs(Isolate* isolate) {
981  // Generate if not already in cache.
983  CEntryStub(isolate, 1, mode).GetCode();
984  StoreBufferOverflowStub(isolate, mode).GetCode();
985  isolate->set_fp_stubs_generated(true);
986 }
987 
988 
989 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
990  CEntryStub stub(isolate, 1, kDontSaveFPRegs);
991  stub.GetCode();
992 }
993 
994 
995 void CEntryStub::Generate(MacroAssembler* masm) {
996  // Called from JavaScript; parameters are on stack as if calling JS function.
997  // r0: number of arguments including receiver
998  // r1: pointer to builtin function
999  // fp: frame pointer (restored after C call)
1000  // sp: stack pointer (restored as callee's sp after C call)
1001  // cp: current context (C callee-saved)
1002 
1004 
1005  __ mov(r5, Operand(r1));
1006 
1007  // Compute the argv pointer in a callee-saved register.
1008  __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
1009  __ sub(r1, r1, Operand(kPointerSize));
1010 
1011  // Enter the exit frame that transitions from JavaScript to C++.
1012  FrameScope scope(masm, StackFrame::MANUAL);
1013  __ EnterExitFrame(save_doubles());
1014 
1015  // Store a copy of argc in callee-saved registers for later.
1016  __ mov(r4, Operand(r0));
1017 
1018  // r0, r4: number of arguments including receiver (C callee-saved)
1019  // r1: pointer to the first argument (C callee-saved)
1020  // r5: pointer to builtin function (C callee-saved)
1021 
1022  // Result returned in r0 or r0+r1 by default.
1023 
1024 #if V8_HOST_ARCH_ARM
1025  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1026  int frame_alignment_mask = frame_alignment - 1;
1027  if (FLAG_debug_code) {
1028  if (frame_alignment > kPointerSize) {
1029  Label alignment_as_expected;
1030  DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
1031  __ tst(sp, Operand(frame_alignment_mask));
1032  __ b(eq, &alignment_as_expected);
1033  // Don't use Check here, as it will call Runtime_Abort re-entering here.
1034  __ stop("Unexpected alignment");
1035  __ bind(&alignment_as_expected);
1036  }
1037  }
1038 #endif
1039 
1040  // Call C built-in.
1041  // r0 = argc, r1 = argv
1042  __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
1043 
1044  // To let the GC traverse the return address of the exit frames, we need to
1045  // know where the return address is. The CEntryStub is unmovable, so
1046  // we can store the address on the stack to be able to find it again and
1047  // we never have to restore it, because it will not change.
1048  // Compute the return address in lr to return to after the jump below. Pc is
1049  // already at '+ 8' from the current instruction but return is after three
1050  // instructions so add another 4 to pc to get the return address.
1051  {
1052  // Prevent literal pool emission before return address.
1053  Assembler::BlockConstPoolScope block_const_pool(masm);
1054  __ add(lr, pc, Operand(4));
1055  __ str(lr, MemOperand(sp, 0));
1056  __ Call(r5);
1057  }
1058 
1059  __ VFPEnsureFPSCRState(r2);
1060 
1061  // Runtime functions should not return 'the hole'. Allowing it to escape may
1062  // lead to crashes in the IC code later.
1063  if (FLAG_debug_code) {
1064  Label okay;
1065  __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
1066  __ b(ne, &okay);
1067  __ stop("The hole escaped");
1068  __ bind(&okay);
1069  }
1070 
1071  // Check result for exception sentinel.
1072  Label exception_returned;
1073  __ CompareRoot(r0, Heap::kExceptionRootIndex);
1074  __ b(eq, &exception_returned);
1075 
1076  ExternalReference pending_exception_address(
1077  Isolate::kPendingExceptionAddress, isolate());
1078 
1079  // Check that there is no pending exception, otherwise we
1080  // should have returned the exception sentinel.
1081  if (FLAG_debug_code) {
1082  Label okay;
1083  __ mov(r2, Operand(pending_exception_address));
1084  __ ldr(r2, MemOperand(r2));
1085  __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
1086  // Cannot use check here as it attempts to generate call into runtime.
1087  __ b(eq, &okay);
1088  __ stop("Unexpected pending exception");
1089  __ bind(&okay);
1090  }
1091 
1092  // Exit C frame and return.
1093  // r0:r1: result
1094  // sp: stack pointer
1095  // fp: frame pointer
1096  // Callee-saved register r4 still holds argc.
1097  __ LeaveExitFrame(save_doubles(), r4, true);
1098  __ mov(pc, lr);
1099 
1100  // Handling of exception.
1101  __ bind(&exception_returned);
1102 
1103  // Retrieve the pending exception.
1104  __ mov(r2, Operand(pending_exception_address));
1105  __ ldr(r0, MemOperand(r2));
1106 
1107  // Clear the pending exception.
1108  __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
1109  __ str(r3, MemOperand(r2));
1110 
1111  // Special handling of termination exceptions which are uncatchable
1112  // by javascript code.
1113  Label throw_termination_exception;
1114  __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
1115  __ b(eq, &throw_termination_exception);
1116 
1117  // Handle normal exception.
1118  __ Throw(r0);
1119 
1120  __ bind(&throw_termination_exception);
1121  __ ThrowUncatchable(r0);
1122 }
1123 
1124 
1125 void JSEntryStub::Generate(MacroAssembler* masm) {
1126  // r0: code entry
1127  // r1: function
1128  // r2: receiver
1129  // r3: argc
1130  // [sp+0]: argv
1131 
1132  Label invoke, handler_entry, exit;
1133 
1135 
1136  // Called from C, so do not pop argc and args on exit (preserve sp)
1137  // No need to save register-passed args
1138  // Save callee-saved registers (incl. cp and fp), sp, and lr
1139  __ stm(db_w, sp, kCalleeSaved | lr.bit());
1140 
1141  // Save callee-saved vfp registers.
1143  // Set up the reserved register for 0.0.
1144  __ vmov(kDoubleRegZero, 0.0);
1145  __ VFPEnsureFPSCRState(r4);
1146 
1147  // Get address of argv, see stm above.
1148  // r0: code entry
1149  // r1: function
1150  // r2: receiver
1151  // r3: argc
1152 
1153  // Set up argv in r4.
1154  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1155  offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
1156  __ ldr(r4, MemOperand(sp, offset_to_argv));
1157 
1158  // Push a frame with special values setup to mark it as an entry frame.
1159  // r0: code entry
1160  // r1: function
1161  // r2: receiver
1162  // r3: argc
1163  // r4: argv
1164  int marker = type();
1165  if (FLAG_enable_ool_constant_pool) {
1166  __ mov(r8, Operand(isolate()->factory()->empty_constant_pool_array()));
1167  }
1168  __ mov(r7, Operand(Smi::FromInt(marker)));
1169  __ mov(r6, Operand(Smi::FromInt(marker)));
1170  __ mov(r5,
1171  Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1172  __ ldr(r5, MemOperand(r5));
1173  __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1174  __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
1175  (FLAG_enable_ool_constant_pool ? r8.bit() : 0) |
1176  ip.bit());
1177 
1178  // Set up frame pointer for the frame to be pushed.
1179  __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1180 
1181  // If this is the outermost JS call, set js_entry_sp value.
1182  Label non_outermost_js;
1183  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1184  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1185  __ ldr(r6, MemOperand(r5));
1186  __ cmp(r6, Operand::Zero());
1187  __ b(ne, &non_outermost_js);
1188  __ str(fp, MemOperand(r5));
1189  __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1190  Label cont;
1191  __ b(&cont);
1192  __ bind(&non_outermost_js);
1193  __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1194  __ bind(&cont);
1195  __ push(ip);
1196 
1197  // Jump to a faked try block that does the invoke, with a faked catch
1198  // block that sets the pending exception.
1199  __ jmp(&invoke);
1200 
1201  // Block literal pool emission whilst taking the position of the handler
1202  // entry. This avoids making the assumption that literal pools are always
1203  // emitted after an instruction is emitted, rather than before.
1204  {
1205  Assembler::BlockConstPoolScope block_const_pool(masm);
1206  __ bind(&handler_entry);
1207  handler_offset_ = handler_entry.pos();
1208  // Caught exception: Store result (exception) in the pending exception
1209  // field in the JSEnv and return a failure sentinel. Coming in here the
1210  // fp will be invalid because the PushTryHandler below sets it to 0 to
1211  // signal the existence of the JSEntry frame.
1212  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1213  isolate())));
1214  }
1215  __ str(r0, MemOperand(ip));
1216  __ LoadRoot(r0, Heap::kExceptionRootIndex);
1217  __ b(&exit);
1218 
1219  // Invoke: Link this frame into the handler chain. There's only one
1220  // handler block in this code object, so its index is 0.
1221  __ bind(&invoke);
1222  // Must preserve r0-r4, r5-r6 are available.
1223  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1224  // If an exception not caught by another handler occurs, this handler
1225  // returns control to the code after the bl(&invoke) above, which
1226  // restores all kCalleeSaved registers (including cp and fp) to their
1227  // saved values before returning a failure to C.
1228 
1229  // Clear any pending exceptions.
1230  __ mov(r5, Operand(isolate()->factory()->the_hole_value()));
1231  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1232  isolate())));
1233  __ str(r5, MemOperand(ip));
1234 
1235  // Invoke the function by calling through JS entry trampoline builtin.
1236  // Notice that we cannot store a reference to the trampoline code directly in
1237  // this stub, because runtime stubs are not traversed when doing GC.
1238 
1239  // Expected registers by Builtins::JSEntryTrampoline
1240  // r0: code entry
1241  // r1: function
1242  // r2: receiver
1243  // r3: argc
1244  // r4: argv
1245  if (type() == StackFrame::ENTRY_CONSTRUCT) {
1246  ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1247  isolate());
1248  __ mov(ip, Operand(construct_entry));
1249  } else {
1250  ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
1251  __ mov(ip, Operand(entry));
1252  }
1253  __ ldr(ip, MemOperand(ip)); // deref address
1254  __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1255 
1256  // Branch and link to JSEntryTrampoline.
1257  __ Call(ip);
1258 
1259  // Unlink this frame from the handler chain.
1260  __ PopTryHandler();
1261 
1262  __ bind(&exit); // r0 holds result
1263  // Check if the current stack frame is marked as the outermost JS frame.
1264  Label non_outermost_js_2;
1265  __ pop(r5);
1266  __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1267  __ b(ne, &non_outermost_js_2);
1268  __ mov(r6, Operand::Zero());
1269  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1270  __ str(r6, MemOperand(r5));
1271  __ bind(&non_outermost_js_2);
1272 
1273  // Restore the top frame descriptors from the stack.
1274  __ pop(r3);
1275  __ mov(ip,
1276  Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1277  __ str(r3, MemOperand(ip));
1278 
1279  // Reset the stack to the callee saved registers.
1280  __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1281 
1282  // Restore callee-saved registers and return.
1283 #ifdef DEBUG
1284  if (FLAG_debug_code) {
1285  __ mov(lr, Operand(pc));
1286  }
1287 #endif
1288 
1289  // Restore callee-saved vfp registers.
1291 
1292  __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
1293 }
1294 
1295 
1296 // Uses registers r0 to r4.
1297 // Expected input (depending on whether args are in registers or on the stack):
1298 // * object: r0 or at sp + 1 * kPointerSize.
1299 // * function: r1 or at sp.
1300 //
1301 // An inlined call site may have been generated before calling this stub.
1302 // In this case the offset to the inline sites to patch are passed in r5 and r6.
1303 // (See LCodeGen::DoInstanceOfKnownGlobal)
1304 void InstanceofStub::Generate(MacroAssembler* masm) {
1305  // Call site inlining and patching implies arguments in registers.
1307 
1308  // Fixed register usage throughout the stub:
1309  const Register object = r0; // Object (lhs).
1310  Register map = r3; // Map of the object.
1311  const Register function = r1; // Function (rhs).
1312  const Register prototype = r4; // Prototype of the function.
1313  const Register scratch = r2;
1314 
1315  Label slow, loop, is_instance, is_not_instance, not_js_object;
1316 
1317  if (!HasArgsInRegisters()) {
1318  __ ldr(object, MemOperand(sp, 1 * kPointerSize));
1319  __ ldr(function, MemOperand(sp, 0));
1320  }
1321 
1322  // Check that the left hand is a JS object and load map.
1323  __ JumpIfSmi(object, &not_js_object);
1324  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
1325 
1326  // If there is a call site cache don't look in the global cache, but do the
1327  // real lookup and update the call site cache.
1329  Label miss;
1330  __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1331  __ b(ne, &miss);
1332  __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
1333  __ b(ne, &miss);
1334  __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1335  __ Ret(HasArgsInRegisters() ? 0 : 2);
1336 
1337  __ bind(&miss);
1338  }
1339 
1340  // Get the prototype of the function.
1341  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1342 
1343  // Check that the function prototype is a JS object.
1344  __ JumpIfSmi(prototype, &slow);
1345  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1346 
1347  // Update the global instanceof or call site inlined cache with the current
1348  // map and function. The cached answer will be set when it is known below.
1349  if (!HasCallSiteInlineCheck()) {
1350  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1351  __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1352  } else {
1354  // Patch the (relocated) inlined map check.
1355 
1356  // The map_load_offset was stored in r5
1357  // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1358  const Register map_load_offset = r5;
1359  __ sub(r9, lr, map_load_offset);
1360  // Get the map location in r5 and patch it.
1361  __ GetRelocatedValueLocation(r9, map_load_offset, scratch);
1362  __ ldr(map_load_offset, MemOperand(map_load_offset));
1363  __ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset));
1364  }
1365 
1366  // Register mapping: r3 is object map and r4 is function prototype.
1367  // Get prototype of object into r2.
1368  __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1369 
1370  // We don't need map any more. Use it as a scratch register.
1371  Register scratch2 = map;
1372  map = no_reg;
1373 
1374  // Loop through the prototype chain looking for the function prototype.
1375  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1376  __ bind(&loop);
1377  __ cmp(scratch, Operand(prototype));
1378  __ b(eq, &is_instance);
1379  __ cmp(scratch, scratch2);
1380  __ b(eq, &is_not_instance);
1381  __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1382  __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1383  __ jmp(&loop);
1384  Factory* factory = isolate()->factory();
1385 
1386  __ bind(&is_instance);
1387  if (!HasCallSiteInlineCheck()) {
1388  __ mov(r0, Operand(Smi::FromInt(0)));
1389  __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1390  if (ReturnTrueFalseObject()) {
1391  __ Move(r0, factory->true_value());
1392  }
1393  } else {
1394  // Patch the call site to return true.
1395  __ LoadRoot(r0, Heap::kTrueValueRootIndex);
1396  // The bool_load_offset was stored in r6
1397  // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1398  const Register bool_load_offset = r6;
1399  __ sub(r9, lr, bool_load_offset);
1400  // Get the boolean result location in scratch and patch it.
1401  __ GetRelocatedValueLocation(r9, scratch, scratch2);
1402  __ str(r0, MemOperand(scratch));
1403 
1404  if (!ReturnTrueFalseObject()) {
1405  __ mov(r0, Operand(Smi::FromInt(0)));
1406  }
1407  }
1408  __ Ret(HasArgsInRegisters() ? 0 : 2);
1409 
1410  __ bind(&is_not_instance);
1411  if (!HasCallSiteInlineCheck()) {
1412  __ mov(r0, Operand(Smi::FromInt(1)));
1413  __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1414  if (ReturnTrueFalseObject()) {
1415  __ Move(r0, factory->false_value());
1416  }
1417  } else {
1418  // Patch the call site to return false.
1419  __ LoadRoot(r0, Heap::kFalseValueRootIndex);
1420  // The bool_load_offset was stored in r6
1421  // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1422  const Register bool_load_offset = r6;
1423  __ sub(r9, lr, bool_load_offset);
1424  ;
1425  // Get the boolean result location in scratch and patch it.
1426  __ GetRelocatedValueLocation(r9, scratch, scratch2);
1427  __ str(r0, MemOperand(scratch));
1428 
1429  if (!ReturnTrueFalseObject()) {
1430  __ mov(r0, Operand(Smi::FromInt(1)));
1431  }
1432  }
1433  __ Ret(HasArgsInRegisters() ? 0 : 2);
1434 
1435  Label object_not_null, object_not_null_or_smi;
1436  __ bind(&not_js_object);
1437  // Before null, smi and string value checks, check that the rhs is a function
1438  // as for a non-function rhs an exception needs to be thrown.
1439  __ JumpIfSmi(function, &slow);
1440  __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
1441  __ b(ne, &slow);
1442 
1443  // Null is not instance of anything.
1444  __ cmp(scratch, Operand(isolate()->factory()->null_value()));
1445  __ b(ne, &object_not_null);
1446  if (ReturnTrueFalseObject()) {
1447  __ Move(r0, factory->false_value());
1448  } else {
1449  __ mov(r0, Operand(Smi::FromInt(1)));
1450  }
1451  __ Ret(HasArgsInRegisters() ? 0 : 2);
1452 
1453  __ bind(&object_not_null);
1454  // Smi values are not instances of anything.
1455  __ JumpIfNotSmi(object, &object_not_null_or_smi);
1456  if (ReturnTrueFalseObject()) {
1457  __ Move(r0, factory->false_value());
1458  } else {
1459  __ mov(r0, Operand(Smi::FromInt(1)));
1460  }
1461  __ Ret(HasArgsInRegisters() ? 0 : 2);
1462 
1463  __ bind(&object_not_null_or_smi);
1464  // String values are not instances of anything.
1465  __ IsObjectJSStringType(object, scratch, &slow);
1466  if (ReturnTrueFalseObject()) {
1467  __ Move(r0, factory->false_value());
1468  } else {
1469  __ mov(r0, Operand(Smi::FromInt(1)));
1470  }
1471  __ Ret(HasArgsInRegisters() ? 0 : 2);
1472 
1473  // Slow-case. Tail call builtin.
1474  __ bind(&slow);
1475  if (!ReturnTrueFalseObject()) {
1476  if (HasArgsInRegisters()) {
1477  __ Push(r0, r1);
1478  }
1479  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
1480  } else {
1481  {
1482  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1483  __ Push(r0, r1);
1484  __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1485  }
1486  __ cmp(r0, Operand::Zero());
1487  __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
1488  __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
1489  __ Ret(HasArgsInRegisters() ? 0 : 2);
1490  }
1491 }
1492 
1493 
1494 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1495  Label miss;
1496  Register receiver = LoadDescriptor::ReceiverRegister();
1497 
1499  r4, &miss);
1500  __ bind(&miss);
1501  PropertyAccessCompiler::TailCallBuiltin(
1502  masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1503 }
1504 
1505 
1506 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1507  // The displacement is the offset of the last parameter (if any)
1508  // relative to the frame pointer.
1509  const int kDisplacement =
1513 
1514  // Check that the key is a smi.
1515  Label slow;
1516  __ JumpIfNotSmi(r1, &slow);
1517 
1518  // Check if the calling frame is an arguments adaptor frame.
1519  Label adaptor;
1523  __ b(eq, &adaptor);
1524 
1525  // Check index against formal parameters count limit passed in
1526  // through register r0. Use unsigned comparison to get negative
1527  // check for free.
1528  __ cmp(r1, r0);
1529  __ b(hs, &slow);
1530 
1531  // Read the argument from the stack and return it.
1532  __ sub(r3, r0, r1);
1533  __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3));
1534  __ ldr(r0, MemOperand(r3, kDisplacement));
1535  __ Jump(lr);
1536 
1537  // Arguments adaptor case: Check index against actual arguments
1538  // limit found in the arguments adaptor frame. Use unsigned
1539  // comparison to get negative check for free.
1540  __ bind(&adaptor);
1542  __ cmp(r1, r0);
1543  __ b(cs, &slow);
1544 
1545  // Read the argument from the adaptor frame and return it.
1546  __ sub(r3, r0, r1);
1547  __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3));
1548  __ ldr(r0, MemOperand(r3, kDisplacement));
1549  __ Jump(lr);
1550 
1551  // Slow-case: Handle non-smi or out-of-bounds access to arguments
1552  // by calling the runtime system.
1553  __ bind(&slow);
1554  __ push(r1);
1555  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1556 }
1557 
1558 
1559 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1560  // sp[0] : number of parameters
1561  // sp[4] : receiver displacement
1562  // sp[8] : function
1563 
1564  // Check if the calling frame is an arguments adaptor frame.
1565  Label runtime;
1569  __ b(ne, &runtime);
1570 
1571  // Patch the arguments.length and the parameters pointer in the current frame.
1573  __ str(r2, MemOperand(sp, 0 * kPointerSize));
1574  __ add(r3, r3, Operand(r2, LSL, 1));
1576  __ str(r3, MemOperand(sp, 1 * kPointerSize));
1577 
1578  __ bind(&runtime);
1579  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1580 }
1581 
1582 
1583 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1584  // Stack layout:
1585  // sp[0] : number of parameters (tagged)
1586  // sp[4] : address of receiver argument
1587  // sp[8] : function
1588  // Registers used over whole function:
1589  // r6 : allocated object (tagged)
1590  // r9 : mapped parameter count (tagged)
1591 
1592  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
1593  // r1 = parameter count (tagged)
1594 
1595  // Check if the calling frame is an arguments adaptor frame.
1596  Label runtime;
1597  Label adaptor_frame, try_allocate;
1601  __ b(eq, &adaptor_frame);
1602 
1603  // No adaptor, parameter count = argument count.
1604  __ mov(r2, r1);
1605  __ b(&try_allocate);
1606 
1607  // We have an adaptor frame. Patch the parameters pointer.
1608  __ bind(&adaptor_frame);
1610  __ add(r3, r3, Operand(r2, LSL, 1));
1612  __ str(r3, MemOperand(sp, 1 * kPointerSize));
1613 
1614  // r1 = parameter count (tagged)
1615  // r2 = argument count (tagged)
1616  // Compute the mapped parameter count = min(r1, r2) in r1.
1617  __ cmp(r1, Operand(r2));
1618  __ mov(r1, Operand(r2), LeaveCC, gt);
1619 
1620  __ bind(&try_allocate);
1621 
1622  // Compute the sizes of backing store, parameter map, and arguments object.
1623  // 1. Parameter map, has 2 extra words containing context and backing store.
1624  const int kParameterMapHeaderSize =
1626  // If there are no mapped parameters, we do not need the parameter_map.
1627  __ cmp(r1, Operand(Smi::FromInt(0)));
1628  __ mov(r9, Operand::Zero(), LeaveCC, eq);
1629  __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
1630  __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
1631 
1632  // 2. Backing store.
1633  __ add(r9, r9, Operand(r2, LSL, 1));
1634  __ add(r9, r9, Operand(FixedArray::kHeaderSize));
1635 
1636  // 3. Arguments object.
1637  __ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
1638 
1639  // Do the allocation of all three objects in one go.
1640  __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
1641 
1642  // r0 = address of new object(s) (tagged)
1643  // r2 = argument count (smi-tagged)
1644  // Get the arguments boilerplate from the current native context into r4.
1645  const int kNormalOffset =
1647  const int kAliasedOffset =
1649 
1652  __ cmp(r1, Operand::Zero());
1653  __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
1654  __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
1655 
1656  // r0 = address of new object (tagged)
1657  // r1 = mapped parameter count (tagged)
1658  // r2 = argument count (smi-tagged)
1659  // r4 = address of arguments map (tagged)
1661  __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
1664 
1665  // Set up the callee in-object property.
1667  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
1668  __ AssertNotSmi(r3);
1669  const int kCalleeOffset = JSObject::kHeaderSize +
1671  __ str(r3, FieldMemOperand(r0, kCalleeOffset));
1672 
1673  // Use the length (smi tagged) and set that as an in-object property too.
1674  __ AssertSmi(r2);
1676  const int kLengthOffset = JSObject::kHeaderSize +
1678  __ str(r2, FieldMemOperand(r0, kLengthOffset));
1679 
1680  // Set up the elements pointer in the allocated arguments object.
1681  // If we allocated a parameter map, r4 will point there, otherwise
1682  // it will point to the backing store.
1683  __ add(r4, r0, Operand(Heap::kSloppyArgumentsObjectSize));
1685 
1686  // r0 = address of new object (tagged)
1687  // r1 = mapped parameter count (tagged)
1688  // r2 = argument count (tagged)
1689  // r4 = address of parameter map or backing store (tagged)
1690  // Initialize parameter map. If there are no mapped arguments, we're done.
1691  Label skip_parameter_map;
1692  __ cmp(r1, Operand(Smi::FromInt(0)));
1693  // Move backing store address to r3, because it is
1694  // expected there when filling in the unmapped arguments.
1695  __ mov(r3, r4, LeaveCC, eq);
1696  __ b(eq, &skip_parameter_map);
1697 
1698  __ LoadRoot(r6, Heap::kSloppyArgumentsElementsMapRootIndex);
1700  __ add(r6, r1, Operand(Smi::FromInt(2)));
1703  __ add(r6, r4, Operand(r1, LSL, 1));
1704  __ add(r6, r6, Operand(kParameterMapHeaderSize));
1706 
1707  // Copy the parameter slots and the holes in the arguments.
1708  // We need to fill in mapped_parameter_count slots. They index the context,
1709  // where parameters are stored in reverse order, at
1710  // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1711  // The mapped parameter thus need to get indices
1712  // MIN_CONTEXT_SLOTS+parameter_count-1 ..
1713  // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1714  // We loop from right to left.
1715  Label parameters_loop, parameters_test;
1716  __ mov(r6, r1);
1717  __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
1718  __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
1719  __ sub(r9, r9, Operand(r1));
1720  __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
1721  __ add(r3, r4, Operand(r6, LSL, 1));
1722  __ add(r3, r3, Operand(kParameterMapHeaderSize));
1723 
1724  // r6 = loop variable (tagged)
1725  // r1 = mapping index (tagged)
1726  // r3 = address of backing store (tagged)
1727  // r4 = address of parameter map (tagged), which is also the address of new
1728  // object + Heap::kSloppyArgumentsObjectSize (tagged)
1729  // r0 = temporary scratch (a.o., for address calculation)
1730  // r5 = the hole value
1731  __ jmp(&parameters_test);
1732 
1733  __ bind(&parameters_loop);
1734  __ sub(r6, r6, Operand(Smi::FromInt(1)));
1735  __ mov(r0, Operand(r6, LSL, 1));
1736  __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
1737  __ str(r9, MemOperand(r4, r0));
1738  __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
1739  __ str(r5, MemOperand(r3, r0));
1740  __ add(r9, r9, Operand(Smi::FromInt(1)));
1741  __ bind(&parameters_test);
1742  __ cmp(r6, Operand(Smi::FromInt(0)));
1743  __ b(ne, &parameters_loop);
1744 
1745  // Restore r0 = new object (tagged)
1746  __ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize));
1747 
1748  __ bind(&skip_parameter_map);
1749  // r0 = address of new object (tagged)
1750  // r2 = argument count (tagged)
1751  // r3 = address of backing store (tagged)
1752  // r5 = scratch
1753  // Copy arguments header and remaining slots (if there are any).
1754  __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
1757 
1758  Label arguments_loop, arguments_test;
1759  __ mov(r9, r1);
1760  __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
1761  __ sub(r4, r4, Operand(r9, LSL, 1));
1762  __ jmp(&arguments_test);
1763 
1764  __ bind(&arguments_loop);
1765  __ sub(r4, r4, Operand(kPointerSize));
1766  __ ldr(r6, MemOperand(r4, 0));
1767  __ add(r5, r3, Operand(r9, LSL, 1));
1769  __ add(r9, r9, Operand(Smi::FromInt(1)));
1770 
1771  __ bind(&arguments_test);
1772  __ cmp(r9, Operand(r2));
1773  __ b(lt, &arguments_loop);
1774 
1775  // Return and remove the on-stack parameters.
1776  __ add(sp, sp, Operand(3 * kPointerSize));
1777  __ Ret();
1778 
1779  // Do the runtime call to allocate the arguments object.
1780  // r0 = address of new object (tagged)
1781  // r2 = argument count (tagged)
1782  __ bind(&runtime);
1783  __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
1784  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1785 }
1786 
1787 
1788 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1789  // Return address is in lr.
1790  Label slow;
1791 
1792  Register receiver = LoadDescriptor::ReceiverRegister();
1793  Register key = LoadDescriptor::NameRegister();
1794 
1795  // Check that the key is an array index, that is Uint32.
1796  __ NonNegativeSmiTst(key);
1797  __ b(ne, &slow);
1798 
1799  // Everything is fine, call runtime.
1800  __ Push(receiver, key); // Receiver, key.
1801 
1802  // Perform tail call to the entry.
1803  __ TailCallExternalReference(
1804  ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
1805  masm->isolate()),
1806  2, 1);
1807 
1808  __ bind(&slow);
1809  PropertyAccessCompiler::TailCallBuiltin(
1810  masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1811 }
1812 
1813 
1814 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1815  // sp[0] : number of parameters
1816  // sp[4] : receiver displacement
1817  // sp[8] : function
1818  // Check if the calling frame is an arguments adaptor frame.
1819  Label adaptor_frame, try_allocate, runtime;
1823  __ b(eq, &adaptor_frame);
1824 
1825  // Get the length from the frame.
1826  __ ldr(r1, MemOperand(sp, 0));
1827  __ b(&try_allocate);
1828 
1829  // Patch the arguments.length and the parameters pointer.
1830  __ bind(&adaptor_frame);
1832  __ str(r1, MemOperand(sp, 0));
1833  __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
1835  __ str(r3, MemOperand(sp, 1 * kPointerSize));
1836 
1837  // Try the new space allocation. Start out with computing the size
1838  // of the arguments object and the elements array in words.
1839  Label add_arguments_object;
1840  __ bind(&try_allocate);
1841  __ SmiUntag(r1, SetCC);
1842  __ b(eq, &add_arguments_object);
1843  __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
1844  __ bind(&add_arguments_object);
1846 
1847  // Do the allocation of both objects in one go.
1848  __ Allocate(r1, r0, r2, r3, &runtime,
1849  static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1850 
1851  // Get the arguments boilerplate from the current native context.
1854  __ ldr(r4, MemOperand(
1856 
1858  __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
1861 
1862  // Get the length (smi tagged) and set that as an in-object property too.
1864  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
1865  __ AssertSmi(r1);
1868 
1869  // If there are no actual arguments, we're done.
1870  Label done;
1871  __ cmp(r1, Operand::Zero());
1872  __ b(eq, &done);
1873 
1874  // Get the parameters pointer from the stack.
1875  __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
1876 
1877  // Set up the elements pointer in the allocated arguments object and
1878  // initialize the header in the elements fixed array.
1879  __ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize));
1881  __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
1884  __ SmiUntag(r1);
1885 
1886  // Copy the fixed array slots.
1887  Label loop;
1888  // Set up r4 to point to the first array slot.
1889  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1890  __ bind(&loop);
1891  // Pre-decrement r2 with kPointerSize on each iteration.
1892  // Pre-decrement in order to skip receiver.
1894  // Post-increment r4 with kPointerSize on each iteration.
1896  __ sub(r1, r1, Operand(1));
1897  __ cmp(r1, Operand::Zero());
1898  __ b(ne, &loop);
1899 
1900  // Return and remove the on-stack parameters.
1901  __ bind(&done);
1902  __ add(sp, sp, Operand(3 * kPointerSize));
1903  __ Ret();
1904 
1905  // Do the runtime call to allocate the arguments object.
1906  __ bind(&runtime);
1907  __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
1908 }
1909 
1910 
1911 void RegExpExecStub::Generate(MacroAssembler* masm) {
1912  // Just jump directly to runtime if native RegExp is not selected at compile
1913  // time or if regexp entry in generated code is turned off runtime switch or
1914  // at compilation.
1915 #ifdef V8_INTERPRETED_REGEXP
1916  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
1917 #else // V8_INTERPRETED_REGEXP
1918 
1919  // Stack frame on entry.
1920  // sp[0]: last_match_info (expected JSArray)
1921  // sp[4]: previous index
1922  // sp[8]: subject string
1923  // sp[12]: JSRegExp object
1924 
1925  const int kLastMatchInfoOffset = 0 * kPointerSize;
1926  const int kPreviousIndexOffset = 1 * kPointerSize;
1927  const int kSubjectOffset = 2 * kPointerSize;
1928  const int kJSRegExpOffset = 3 * kPointerSize;
1929 
1930  Label runtime;
1931  // Allocation of registers for this function. These are in callee save
1932  // registers and will be preserved by the call to the native RegExp code, as
1933  // this code is called using the normal C calling convention. When calling
1934  // directly from generated code the native RegExp code will not do a GC and
1935  // therefore the content of these registers are safe to use after the call.
1936  Register subject = r4;
1937  Register regexp_data = r5;
1938  Register last_match_info_elements = no_reg; // will be r6;
1939 
1940  // Ensure that a RegExp stack is allocated.
1941  ExternalReference address_of_regexp_stack_memory_address =
1942  ExternalReference::address_of_regexp_stack_memory_address(isolate());
1943  ExternalReference address_of_regexp_stack_memory_size =
1944  ExternalReference::address_of_regexp_stack_memory_size(isolate());
1945  __ mov(r0, Operand(address_of_regexp_stack_memory_size));
1946  __ ldr(r0, MemOperand(r0, 0));
1947  __ cmp(r0, Operand::Zero());
1948  __ b(eq, &runtime);
1949 
1950  // Check that the first argument is a JSRegExp object.
1951  __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
1952  __ JumpIfSmi(r0, &runtime);
1953  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
1954  __ b(ne, &runtime);
1955 
1956  // Check that the RegExp has been compiled (data contains a fixed array).
1957  __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
1958  if (FLAG_debug_code) {
1959  __ SmiTst(regexp_data);
1960  __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1961  __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
1962  __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1963  }
1964 
1965  // regexp_data: RegExp data (FixedArray)
1966  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1967  __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1968  __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
1969  __ b(ne, &runtime);
1970 
1971  // regexp_data: RegExp data (FixedArray)
1972  // Check that the number of captures fit in the static offsets vector buffer.
1973  __ ldr(r2,
1975  // Check (number_of_captures + 1) * 2 <= offsets vector size
1976  // Or number_of_captures * 2 <= offsets vector size - 2
1977  // Multiplying by 2 comes for free since r2 is smi-tagged.
1978  STATIC_ASSERT(kSmiTag == 0);
1981  __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
1982  __ b(hi, &runtime);
1983 
1984  // Reset offset for possibly sliced string.
1985  __ mov(r9, Operand::Zero());
1986  __ ldr(subject, MemOperand(sp, kSubjectOffset));
1987  __ JumpIfSmi(subject, &runtime);
1988  __ mov(r3, subject); // Make a copy of the original subject string.
1989  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
1991  // subject: subject string
1992  // r3: subject string
1993  // r0: subject string instance type
1994  // regexp_data: RegExp data (FixedArray)
1995  // Handle subject string according to its encoding and representation:
1996  // (1) Sequential string? If yes, go to (5).
1997  // (2) Anything but sequential or cons? If yes, go to (6).
1998  // (3) Cons string. If the string is flat, replace subject with first string.
1999  // Otherwise bailout.
2000  // (4) Is subject external? If yes, go to (7).
2001  // (5) Sequential string. Load regexp code according to encoding.
2002  // (E) Carry on.
2003  /// [...]
2004 
2005  // Deferred code at the end of the stub:
2006  // (6) Not a long external string? If yes, go to (8).
2007  // (7) External string. Make it, offset-wise, look like a sequential string.
2008  // Go to (5).
2009  // (8) Short external string or not a string? If yes, bail out to runtime.
2010  // (9) Sliced string. Replace subject with parent. Go to (4).
2011 
2012  Label seq_string /* 5 */, external_string /* 7 */,
2013  check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2014  not_long_external /* 8 */;
2015 
2016  // (1) Sequential string? If yes, go to (5).
2017  __ and_(r1,
2018  r0,
2019  Operand(kIsNotStringMask |
2022  SetCC);
2024  __ b(eq, &seq_string); // Go to (5).
2025 
2026  // (2) Anything but sequential or cons? If yes, go to (6).
2031  __ cmp(r1, Operand(kExternalStringTag));
2032  __ b(ge, &not_seq_nor_cons); // Go to (6).
2033 
2034  // (3) Cons string. Check that it's flat.
2035  // Replace subject with first string and reload instance type.
2037  __ CompareRoot(r0, Heap::kempty_stringRootIndex);
2038  __ b(ne, &runtime);
2039  __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2040 
2041  // (4) Is subject external? If yes, go to (7).
2042  __ bind(&check_underlying);
2043  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2046  __ tst(r0, Operand(kStringRepresentationMask));
2047  // The underlying external string is never a short external string.
2050  __ b(ne, &external_string); // Go to (7).
2051 
2052  // (5) Sequential string. Load regexp code according to encoding.
2053  __ bind(&seq_string);
2054  // subject: sequential subject string (or look-alike, external string)
2055  // r3: original subject string
2056  // Load previous index and check range before r3 is overwritten. We have to
2057  // use r3 instead of subject here because subject might have been only made
2058  // to look like a sequential string when it actually is an external string.
2059  __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
2060  __ JumpIfNotSmi(r1, &runtime);
2062  __ cmp(r3, Operand(r1));
2063  __ b(ls, &runtime);
2064  __ SmiUntag(r1);
2065 
2068  __ and_(r0, r0, Operand(kStringEncodingMask));
2069  __ mov(r3, Operand(r0, ASR, 2), SetCC);
2071  ne);
2072  __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
2073 
2074  // (E) Carry on. String handling is done.
2075  // r6: irregexp code
2076  // Check that the irregexp code has been generated for the actual string
2077  // encoding. If it has, the field contains a code object otherwise it contains
2078  // a smi (code flushing support).
2079  __ JumpIfSmi(r6, &runtime);
2080 
2081  // r1: previous index
2082  // r3: encoding of subject string (1 if one_byte, 0 if two_byte);
2083  // r6: code
2084  // subject: Subject string
2085  // regexp_data: RegExp data (FixedArray)
2086  // All checks done. Now push arguments for native regexp code.
2087  __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2);
2088 
2089  // Isolates: note we add an additional parameter here (isolate pointer).
2090  const int kRegExpExecuteArguments = 9;
2091  const int kParameterRegisters = 4;
2092  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2093 
2094  // Stack pointer now points to cell where return address is to be written.
2095  // Arguments are before that on the stack or in registers.
2096 
2097  // Argument 9 (sp[20]): Pass current isolate address.
2098  __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2099  __ str(r0, MemOperand(sp, 5 * kPointerSize));
2100 
2101  // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
2102  __ mov(r0, Operand(1));
2103  __ str(r0, MemOperand(sp, 4 * kPointerSize));
2104 
2105  // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
2106  __ mov(r0, Operand(address_of_regexp_stack_memory_address));
2107  __ ldr(r0, MemOperand(r0, 0));
2108  __ mov(r2, Operand(address_of_regexp_stack_memory_size));
2109  __ ldr(r2, MemOperand(r2, 0));
2110  __ add(r0, r0, Operand(r2));
2111  __ str(r0, MemOperand(sp, 3 * kPointerSize));
2112 
2113  // Argument 6: Set the number of capture registers to zero to force global
2114  // regexps to behave as non-global. This does not affect non-global regexps.
2115  __ mov(r0, Operand::Zero());
2116  __ str(r0, MemOperand(sp, 2 * kPointerSize));
2117 
2118  // Argument 5 (sp[4]): static offsets vector buffer.
2119  __ mov(r0,
2120  Operand(ExternalReference::address_of_static_offsets_vector(
2121  isolate())));
2122  __ str(r0, MemOperand(sp, 1 * kPointerSize));
2123 
2124  // For arguments 4 and 3 get string length, calculate start of string data and
2125  // calculate the shift of the index (0 for one-byte and 1 for two-byte).
2126  __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2127  __ eor(r3, r3, Operand(1));
2128  // Load the length from the original subject string from the previous stack
2129  // frame. Therefore we have to use fp, which points exactly to two pointer
2130  // sizes below the previous sp. (Because creating a new stack frame pushes
2131  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2132  __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2133  // If slice offset is not 0, load the length from the original sliced string.
2134  // Argument 4, r3: End of string data
2135  // Argument 3, r2: Start of string data
2136  // Prepare start and end index of the input.
2137  __ add(r9, r7, Operand(r9, LSL, r3));
2138  __ add(r2, r9, Operand(r1, LSL, r3));
2139 
2140  __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
2141  __ SmiUntag(r7);
2142  __ add(r3, r9, Operand(r7, LSL, r3));
2143 
2144  // Argument 2 (r1): Previous index.
2145  // Already there
2146 
2147  // Argument 1 (r0): Subject string.
2148  __ mov(r0, subject);
2149 
2150  // Locate the code entry and call it.
2151  __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
2152  DirectCEntryStub stub(isolate());
2153  stub.GenerateCall(masm, r6);
2154 
2155  __ LeaveExitFrame(false, no_reg, true);
2156 
2157  last_match_info_elements = r6;
2158 
2159  // r0: result
2160  // subject: subject string (callee saved)
2161  // regexp_data: RegExp data (callee saved)
2162  // last_match_info_elements: Last match info elements (callee saved)
2163  // Check the result.
2164  Label success;
2165  __ cmp(r0, Operand(1));
2166  // We expect exactly one result since we force the called regexp to behave
2167  // as non-global.
2168  __ b(eq, &success);
2169  Label failure;
2170  __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
2171  __ b(eq, &failure);
2173  // If not exception it can only be retry. Handle that in the runtime system.
2174  __ b(ne, &runtime);
2175  // Result must now be exception. If there is no pending exception already a
2176  // stack overflow (on the backtrack stack) was detected in RegExp code but
2177  // haven't created the exception yet. Handle that in the runtime system.
2178  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2179  __ mov(r1, Operand(isolate()->factory()->the_hole_value()));
2180  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2181  isolate())));
2182  __ ldr(r0, MemOperand(r2, 0));
2183  __ cmp(r0, r1);
2184  __ b(eq, &runtime);
2185 
2186  __ str(r1, MemOperand(r2, 0)); // Clear pending exception.
2187 
2188  // Check if the exception is a termination. If so, throw as uncatchable.
2189  __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
2190 
2191  Label termination_exception;
2192  __ b(eq, &termination_exception);
2193 
2194  __ Throw(r0);
2195 
2196  __ bind(&termination_exception);
2197  __ ThrowUncatchable(r0);
2198 
2199  __ bind(&failure);
2200  // For failure and exception return null.
2201  __ mov(r0, Operand(isolate()->factory()->null_value()));
2202  __ add(sp, sp, Operand(4 * kPointerSize));
2203  __ Ret();
2204 
2205  // Process the result from the native regexp code.
2206  __ bind(&success);
2207  __ ldr(r1,
2209  // Calculate number of capture registers (number_of_captures + 1) * 2.
2210  // Multiplying by 2 comes for free since r1 is smi-tagged.
2211  STATIC_ASSERT(kSmiTag == 0);
2213  __ add(r1, r1, Operand(2)); // r1 was a smi.
2214 
2215  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2216  __ JumpIfSmi(r0, &runtime);
2217  __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
2218  __ b(ne, &runtime);
2219  // Check that the JSArray is in fast case.
2220  __ ldr(last_match_info_elements,
2222  __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2223  __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
2224  __ b(ne, &runtime);
2225  // Check that the last match info has space for the capture registers and the
2226  // additional information.
2227  __ ldr(r0,
2228  FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2229  __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
2230  __ cmp(r2, Operand::SmiUntag(r0));
2231  __ b(gt, &runtime);
2232 
2233  // r1: number of capture registers
2234  // r4: subject string
2235  // Store the capture count.
2236  __ SmiTag(r2, r1);
2237  __ str(r2, FieldMemOperand(last_match_info_elements,
2239  // Store last subject and last input.
2240  __ str(subject,
2241  FieldMemOperand(last_match_info_elements,
2243  __ mov(r2, subject);
2244  __ RecordWriteField(last_match_info_elements,
2246  subject,
2247  r3,
2249  kDontSaveFPRegs);
2250  __ mov(subject, r2);
2251  __ str(subject,
2252  FieldMemOperand(last_match_info_elements,
2254  __ RecordWriteField(last_match_info_elements,
2256  subject,
2257  r3,
2259  kDontSaveFPRegs);
2260 
2261  // Get the static offsets vector filled by the native regexp code.
2262  ExternalReference address_of_static_offsets_vector =
2263  ExternalReference::address_of_static_offsets_vector(isolate());
2264  __ mov(r2, Operand(address_of_static_offsets_vector));
2265 
2266  // r1: number of capture registers
2267  // r2: offsets vector
2268  Label next_capture, done;
2269  // Capture register counter starts from number of capture registers and
2270  // counts down until wraping after zero.
2271  __ add(r0,
2272  last_match_info_elements,
2274  __ bind(&next_capture);
2275  __ sub(r1, r1, Operand(1), SetCC);
2276  __ b(mi, &done);
2277  // Read the value from the static offsets vector buffer.
2279  // Store the smi value in the last match info.
2280  __ SmiTag(r3);
2282  __ jmp(&next_capture);
2283  __ bind(&done);
2284 
2285  // Return last match info.
2286  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2287  __ add(sp, sp, Operand(4 * kPointerSize));
2288  __ Ret();
2289 
2290  // Do the runtime call to execute the regexp.
2291  __ bind(&runtime);
2292  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2293 
2294  // Deferred code for string handling.
2295  // (6) Not a long external string? If yes, go to (8).
2296  __ bind(&not_seq_nor_cons);
2297  // Compare flags are still set.
2298  __ b(gt, &not_long_external); // Go to (8).
2299 
2300  // (7) External string. Make it, offset-wise, look like a sequential string.
2301  __ bind(&external_string);
2302  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2304  if (FLAG_debug_code) {
2305  // Assert that we do not have a cons or slice (indirect strings) here.
2306  // Sequential strings have already been ruled out.
2307  __ tst(r0, Operand(kIsIndirectStringMask));
2308  __ Assert(eq, kExternalStringExpectedButNotFound);
2309  }
2310  __ ldr(subject,
2312  // Move the pointer so that offset-wise, it looks like a sequential string.
2314  __ sub(subject,
2315  subject,
2317  __ jmp(&seq_string); // Go to (5).
2318 
2319  // (8) Short external string or not a string? If yes, bail out to runtime.
2320  __ bind(&not_long_external);
2323  __ b(ne, &runtime);
2324 
2325  // (9) Sliced string. Replace subject with parent. Go to (4).
2326  // Load offset into r9 and replace subject string with parent.
2328  __ SmiUntag(r9);
2329  __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2330  __ jmp(&check_underlying); // Go to (4).
2331 #endif // V8_INTERPRETED_REGEXP
2332 }
2333 
2334 
2335 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2336  // Cache the called function in a feedback vector slot. Cache states
2337  // are uninitialized, monomorphic (indicated by a JSFunction), and
2338  // megamorphic.
2339  // r0 : number of arguments to the construct function
2340  // r1 : the function to call
2341  // r2 : Feedback vector
2342  // r3 : slot in feedback vector (Smi)
2343  Label initialize, done, miss, megamorphic, not_array_function;
2344 
2346  masm->isolate()->heap()->megamorphic_symbol());
2348  masm->isolate()->heap()->uninitialized_symbol());
2349 
2350  // Load the cache state into r4.
2351  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2353 
2354  // A monomorphic cache hit or an already megamorphic state: invoke the
2355  // function without changing the state.
2356  __ cmp(r4, r1);
2357  __ b(eq, &done);
2358 
2359  if (!FLAG_pretenuring_call_new) {
2360  // If we came here, we need to see if we are the array function.
2361  // If we didn't have a matching function, and we didn't find the megamorph
2362  // sentinel, then we have in the slot either some other function or an
2363  // AllocationSite. Do a map check on the object in ecx.
2364  __ ldr(r5, FieldMemOperand(r4, 0));
2365  __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2366  __ b(ne, &miss);
2367 
2368  // Make sure the function is the Array() function
2369  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2370  __ cmp(r1, r4);
2371  __ b(ne, &megamorphic);
2372  __ jmp(&done);
2373  }
2374 
2375  __ bind(&miss);
2376 
2377  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2378  // megamorphic.
2379  __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
2380  __ b(eq, &initialize);
2381  // MegamorphicSentinel is an immortal immovable object (undefined) so no
2382  // write-barrier is needed.
2383  __ bind(&megamorphic);
2384  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2385  __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
2387  __ jmp(&done);
2388 
2389  // An uninitialized cache is patched with the function
2390  __ bind(&initialize);
2391 
2392  if (!FLAG_pretenuring_call_new) {
2393  // Make sure the function is the Array() function
2394  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2395  __ cmp(r1, r4);
2396  __ b(ne, &not_array_function);
2397 
2398  // The target function is the Array constructor,
2399  // Create an AllocationSite if we don't already have it, store it in the
2400  // slot.
2401  {
2402  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2403 
2404  // Arguments register must be smi-tagged to call out.
2405  __ SmiTag(r0);
2406  __ Push(r3, r2, r1, r0);
2407 
2408  CreateAllocationSiteStub create_stub(masm->isolate());
2409  __ CallStub(&create_stub);
2410 
2411  __ Pop(r3, r2, r1, r0);
2412  __ SmiUntag(r0);
2413  }
2414  __ b(&done);
2415 
2416  __ bind(&not_array_function);
2417  }
2418 
2419  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2420  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2421  __ str(r1, MemOperand(r4, 0));
2422 
2423  __ Push(r4, r2, r1);
2424  __ RecordWrite(r2, r4, r1, kLRHasNotBeenSaved, kDontSaveFPRegs,
2426  __ Pop(r4, r2, r1);
2427 
2428  __ bind(&done);
2429 }
2430 
2431 
2432 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2433  // Do not transform the receiver for strict mode functions.
2436  __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
2437  kSmiTagSize)));
2438  __ b(ne, cont);
2439 
2440  // Do not transform the receiver for native (Compilerhints already in r3).
2441  __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
2442  __ b(ne, cont);
2443 }
2444 
2445 
2446 static void EmitSlowCase(MacroAssembler* masm,
2447  int argc,
2448  Label* non_function) {
2449  // Check for function proxy.
2450  __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
2451  __ b(ne, non_function);
2452  __ push(r1); // put proxy as additional argument
2453  __ mov(r0, Operand(argc + 1, RelocInfo::NONE32));
2454  __ mov(r2, Operand::Zero());
2455  __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
2456  {
2457  Handle<Code> adaptor =
2458  masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2459  __ Jump(adaptor, RelocInfo::CODE_TARGET);
2460  }
2461 
2462  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2463  // of the original receiver from the call site).
2464  __ bind(non_function);
2465  __ str(r1, MemOperand(sp, argc * kPointerSize));
2466  __ mov(r0, Operand(argc)); // Set up the number of arguments.
2467  __ mov(r2, Operand::Zero());
2468  __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION);
2469  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2471 }
2472 
2473 
2474 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2475  // Wrap the receiver and patch it back onto the stack.
2476  { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
2477  __ Push(r1, r3);
2478  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2479  __ pop(r1);
2480  }
2481  __ str(r0, MemOperand(sp, argc * kPointerSize));
2482  __ jmp(cont);
2483 }
2484 
2485 
2486 static void CallFunctionNoFeedback(MacroAssembler* masm,
2487  int argc, bool needs_checks,
2488  bool call_as_method) {
2489  // r1 : the function to call
2490  Label slow, non_function, wrap, cont;
2491 
2492  if (needs_checks) {
2493  // Check that the function is really a JavaScript function.
2494  // r1: pushed function (to be verified)
2495  __ JumpIfSmi(r1, &non_function);
2496 
2497  // Goto slow case if we do not have a function.
2498  __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2499  __ b(ne, &slow);
2500  }
2501 
2502  // Fast-case: Invoke the function now.
2503  // r1: pushed function
2504  ParameterCount actual(argc);
2505 
2506  if (call_as_method) {
2507  if (needs_checks) {
2508  EmitContinueIfStrictOrNative(masm, &cont);
2509  }
2510 
2511  // Compute the receiver in sloppy mode.
2512  __ ldr(r3, MemOperand(sp, argc * kPointerSize));
2513 
2514  if (needs_checks) {
2515  __ JumpIfSmi(r3, &wrap);
2516  __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
2517  __ b(lt, &wrap);
2518  } else {
2519  __ jmp(&wrap);
2520  }
2521 
2522  __ bind(&cont);
2523  }
2524 
2525  __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
2526 
2527  if (needs_checks) {
2528  // Slow-case: Non-function called.
2529  __ bind(&slow);
2530  EmitSlowCase(masm, argc, &non_function);
2531  }
2532 
2533  if (call_as_method) {
2534  __ bind(&wrap);
2535  EmitWrapCase(masm, argc, &cont);
2536  }
2537 }
2538 
2539 
2540 void CallFunctionStub::Generate(MacroAssembler* masm) {
2541  CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
2542 }
2543 
2544 
2545 void CallConstructStub::Generate(MacroAssembler* masm) {
2546  // r0 : number of arguments
2547  // r1 : the function to call
2548  // r2 : feedback vector
2549  // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
2550  // vector (Smi)
2551  Label slow, non_function_call;
2552 
2553  // Check that the function is not a smi.
2554  __ JumpIfSmi(r1, &non_function_call);
2555  // Check that the function is a JSFunction.
2556  __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2557  __ b(ne, &slow);
2558 
2559  if (RecordCallTarget()) {
2560  GenerateRecordCallTarget(masm);
2561 
2562  __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
2563  if (FLAG_pretenuring_call_new) {
2564  // Put the AllocationSite from the feedback vector into r2.
2565  // By adding kPointerSize we encode that we know the AllocationSite
2566  // entry is at the feedback vector slot given by r3 + 1.
2568  } else {
2569  Label feedback_register_initialized;
2570  // Put the AllocationSite from the feedback vector into r2, or undefined.
2573  __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2574  __ b(eq, &feedback_register_initialized);
2575  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
2576  __ bind(&feedback_register_initialized);
2577  }
2578 
2579  __ AssertUndefinedOrAllocationSite(r2, r5);
2580  }
2581 
2582  // Jump to the function-specific construct stub.
2583  Register jmp_reg = r4;
2585  __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
2587  __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
2588 
2589  // r0: number of arguments
2590  // r1: called object
2591  // r4: object type
2592  Label do_call;
2593  __ bind(&slow);
2594  __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
2595  __ b(ne, &non_function_call);
2596  __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2597  __ jmp(&do_call);
2598 
2599  __ bind(&non_function_call);
2600  __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2601  __ bind(&do_call);
2602  // Set expected number of arguments to zero (not changing r0).
2603  __ mov(r2, Operand::Zero());
2604  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2606 }
2607 
2608 
2609 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2611  __ ldr(vector, FieldMemOperand(vector,
2613  __ ldr(vector, FieldMemOperand(vector,
2615 }
2616 
2617 
2618 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2619  // r1 - function
2620  // r3 - slot id
2621  Label miss;
2622  int argc = arg_count();
2623  ParameterCount actual(argc);
2624 
2625  EmitLoadTypeFeedbackVector(masm, r2);
2626 
2627  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2628  __ cmp(r1, r4);
2629  __ b(ne, &miss);
2630 
2631  __ mov(r0, Operand(arg_count()));
2632  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2634 
2635  // Verify that r4 contains an AllocationSite
2637  __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2638  __ b(ne, &miss);
2639 
2640  __ mov(r2, r4);
2641  ArrayConstructorStub stub(masm->isolate(), arg_count());
2642  __ TailCallStub(&stub);
2643 
2644  __ bind(&miss);
2645  GenerateMiss(masm);
2646 
2647  // The slow case, we need this no matter what to complete a call after a miss.
2648  CallFunctionNoFeedback(masm,
2649  arg_count(),
2650  true,
2651  CallAsMethod());
2652 
2653  // Unreachable.
2654  __ stop("Unexpected code address");
2655 }
2656 
2657 
2658 void CallICStub::Generate(MacroAssembler* masm) {
2659  // r1 - function
2660  // r3 - slot id (Smi)
2661  Label extra_checks_or_miss, slow_start;
2662  Label slow, non_function, wrap, cont;
2663  Label have_js_function;
2664  int argc = arg_count();
2665  ParameterCount actual(argc);
2666 
2667  EmitLoadTypeFeedbackVector(masm, r2);
2668 
2669  // The checks. First, does r1 match the recorded monomorphic target?
2670  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2672  __ cmp(r1, r4);
2673  __ b(ne, &extra_checks_or_miss);
2674 
2675  __ bind(&have_js_function);
2676  if (CallAsMethod()) {
2677  EmitContinueIfStrictOrNative(masm, &cont);
2678  // Compute the receiver in sloppy mode.
2679  __ ldr(r3, MemOperand(sp, argc * kPointerSize));
2680 
2681  __ JumpIfSmi(r3, &wrap);
2682  __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
2683  __ b(lt, &wrap);
2684 
2685  __ bind(&cont);
2686  }
2687 
2688  __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
2689 
2690  __ bind(&slow);
2691  EmitSlowCase(masm, argc, &non_function);
2692 
2693  if (CallAsMethod()) {
2694  __ bind(&wrap);
2695  EmitWrapCase(masm, argc, &cont);
2696  }
2697 
2698  __ bind(&extra_checks_or_miss);
2699  Label miss;
2700 
2701  __ CompareRoot(r4, Heap::kMegamorphicSymbolRootIndex);
2702  __ b(eq, &slow_start);
2703  __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
2704  __ b(eq, &miss);
2705 
2706  if (!FLAG_trace_ic) {
2707  // We are going megamorphic. If the feedback is a JSFunction, it is fine
2708  // to handle it here. More complex cases are dealt with in the runtime.
2709  __ AssertNotSmi(r4);
2710  __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
2711  __ b(ne, &miss);
2712  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2713  __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
2715  __ jmp(&slow_start);
2716  }
2717 
2718  // We are here because tracing is on or we are going monomorphic.
2719  __ bind(&miss);
2720  GenerateMiss(masm);
2721 
2722  // the slow case
2723  __ bind(&slow_start);
2724  // Check that the function is really a JavaScript function.
2725  // r1: pushed function (to be verified)
2726  __ JumpIfSmi(r1, &non_function);
2727 
2728  // Goto slow case if we do not have a function.
2729  __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2730  __ b(ne, &slow);
2731  __ jmp(&have_js_function);
2732 }
2733 
2734 
2735 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2736  // Get the receiver of the function from the stack; 1 ~ return address.
2737  __ ldr(r4, MemOperand(sp, (arg_count() + 1) * kPointerSize));
2738 
2739  {
2740  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2741 
2742  // Push the receiver and the function and feedback info.
2743  __ Push(r4, r1, r2, r3);
2744 
2745  // Call the entry.
2746  IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
2747  : IC::kCallIC_Customization_Miss;
2748 
2749  ExternalReference miss = ExternalReference(IC_Utility(id),
2750  masm->isolate());
2751  __ CallExternalReference(miss, 4);
2752 
2753  // Move result to edi and exit the internal frame.
2754  __ mov(r1, r0);
2755  }
2756 }
2757 
2758 
2759 // StringCharCodeAtGenerator
2760 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2761  // If the receiver is a smi trigger the non-string case.
2762  __ JumpIfSmi(object_, receiver_not_string_);
2763 
2764  // Fetch the instance type of the receiver into result register.
2767  // If the receiver is not a string trigger the non-string case.
2768  __ tst(result_, Operand(kIsNotStringMask));
2770 
2771  // If the index is non-smi trigger the non-smi case.
2772  __ JumpIfNotSmi(index_, &index_not_smi_);
2773  __ bind(&got_smi_index_);
2774 
2775  // Check for index out of range.
2777  __ cmp(ip, Operand(index_));
2779 
2780  __ SmiUntag(index_);
2781 
2783  object_,
2784  index_,
2785  result_,
2786  &call_runtime_);
2787 
2788  __ SmiTag(result_);
2789  __ bind(&exit_);
2790 }
2791 
2792 
2794  MacroAssembler* masm,
2795  const RuntimeCallHelper& call_helper) {
2796  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2797 
2798  // Index is not a smi.
2799  __ bind(&index_not_smi_);
2800  // If index is a heap number, try converting it to an integer.
2801  __ CheckMap(index_,
2802  result_,
2803  Heap::kHeapNumberMapRootIndex,
2806  call_helper.BeforeCall(masm);
2807  __ push(object_);
2808  __ push(index_); // Consumed by runtime conversion function.
2810  __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
2811  } else {
2813  // NumberToSmi discards numbers that are not exact integers.
2814  __ CallRuntime(Runtime::kNumberToSmi, 1);
2815  }
2816  // Save the conversion result before the pop instructions below
2817  // have a chance to overwrite it.
2818  __ Move(index_, r0);
2819  __ pop(object_);
2820  // Reload the instance type.
2823  call_helper.AfterCall(masm);
2824  // If index is still not a smi, it must be out of range.
2825  __ JumpIfNotSmi(index_, index_out_of_range_);
2826  // Otherwise, return to the fast path.
2827  __ jmp(&got_smi_index_);
2828 
2829  // Call runtime. We get here when the receiver is a string and the
2830  // index is a number, but the code of getting the actual character
2831  // is too complex (e.g., when the string needs to be flattened).
2832  __ bind(&call_runtime_);
2833  call_helper.BeforeCall(masm);
2834  __ SmiTag(index_);
2835  __ Push(object_, index_);
2836  __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
2837  __ Move(result_, r0);
2838  call_helper.AfterCall(masm);
2839  __ jmp(&exit_);
2840 
2841  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2842 }
2843 
2844 
2845 // -------------------------------------------------------------------------
2846 // StringCharFromCodeGenerator
2847 
2848 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2849  // Fast case of Heap::LookupSingleCharacterStringFromCode.
2850  STATIC_ASSERT(kSmiTag == 0);
2853  __ tst(code_,
2854  Operand(kSmiTagMask |
2856  __ b(ne, &slow_case_);
2857 
2858  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2859  // At this point code register contains smi tagged one-byte char code.
2860  __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
2862  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
2863  __ b(eq, &slow_case_);
2864  __ bind(&exit_);
2865 }
2866 
2867 
2869  MacroAssembler* masm,
2870  const RuntimeCallHelper& call_helper) {
2871  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2872 
2873  __ bind(&slow_case_);
2874  call_helper.BeforeCall(masm);
2875  __ push(code_);
2876  __ CallRuntime(Runtime::kCharFromCode, 1);
2877  __ Move(result_, r0);
2878  call_helper.AfterCall(masm);
2879  __ jmp(&exit_);
2880 
2881  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2882 }
2883 
2884 
2885 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
2886 
2887 
2888 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
2889  Register dest,
2890  Register src,
2891  Register count,
2892  Register scratch,
2893  String::Encoding encoding) {
2894  if (FLAG_debug_code) {
2895  // Check that destination is word aligned.
2896  __ tst(dest, Operand(kPointerAlignmentMask));
2897  __ Check(eq, kDestinationOfCopyNotAligned);
2898  }
2899 
2900  // Assumes word reads and writes are little endian.
2901  // Nothing to do for zero characters.
2902  Label done;
2903  if (encoding == String::TWO_BYTE_ENCODING) {
2904  __ add(count, count, Operand(count), SetCC);
2905  }
2906 
2907  Register limit = count; // Read until dest equals this.
2908  __ add(limit, dest, Operand(count));
2909 
2910  Label loop_entry, loop;
2911  // Copy bytes from src to dest until dest hits limit.
2912  __ b(&loop_entry);
2913  __ bind(&loop);
2914  __ ldrb(scratch, MemOperand(src, 1, PostIndex), lt);
2915  __ strb(scratch, MemOperand(dest, 1, PostIndex));
2916  __ bind(&loop_entry);
2917  __ cmp(dest, Operand(limit));
2918  __ b(lt, &loop);
2919 
2920  __ bind(&done);
2921 }
2922 
2923 
2924 void SubStringStub::Generate(MacroAssembler* masm) {
2925  Label runtime;
2926 
2927  // Stack frame on entry.
2928  // lr: return address
2929  // sp[0]: to
2930  // sp[4]: from
2931  // sp[8]: string
2932 
2933  // This stub is called from the native-call %_SubString(...), so
2934  // nothing can be assumed about the arguments. It is tested that:
2935  // "string" is a sequential string,
2936  // both "from" and "to" are smis, and
2937  // 0 <= from <= to <= string.length.
2938  // If any of these assumptions fail, we call the runtime system.
2939 
2940  const int kToOffset = 0 * kPointerSize;
2941  const int kFromOffset = 1 * kPointerSize;
2942  const int kStringOffset = 2 * kPointerSize;
2943 
2944  __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
2945  STATIC_ASSERT(kFromOffset == kToOffset + 4);
2946  STATIC_ASSERT(kSmiTag == 0);
2948 
2949  // Arithmetic shift right by one un-smi-tags. In this case we rotate right
2950  // instead because we bail out on non-smi values: ROR and ASR are equivalent
2951  // for smis but they set the flags in a way that's easier to optimize.
2952  __ mov(r2, Operand(r2, ROR, 1), SetCC);
2953  __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
2954  // If either to or from had the smi tag bit set, then C is set now, and N
2955  // has the same value: we rotated by 1, so the bottom bit is now the top bit.
2956  // We want to bailout to runtime here if From is negative. In that case, the
2957  // next instruction is not executed and we fall through to bailing out to
2958  // runtime.
2959  // Executed if both r2 and r3 are untagged integers.
2960  __ sub(r2, r2, Operand(r3), SetCC, cc);
2961  // One of the above un-smis or the above SUB could have set N==1.
2962  __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
2963 
2964  // Make sure first argument is a string.
2965  __ ldr(r0, MemOperand(sp, kStringOffset));
2966  __ JumpIfSmi(r0, &runtime);
2967  Condition is_string = masm->IsObjectStringType(r0, r1);
2968  __ b(NegateCondition(is_string), &runtime);
2969 
2970  Label single_char;
2971  __ cmp(r2, Operand(1));
2972  __ b(eq, &single_char);
2973 
2974  // Short-cut for the case of trivial substring.
2975  Label return_r0;
2976  // r0: original string
2977  // r2: result string length
2979  __ cmp(r2, Operand(r4, ASR, 1));
2980  // Return original string.
2981  __ b(eq, &return_r0);
2982  // Longer than original string's length or negative: unsafe arguments.
2983  __ b(hi, &runtime);
2984  // Shorter than original string's length: an actual substring.
2985 
2986  // Deal with different string types: update the index if necessary
2987  // and put the underlying string into r5.
2988  // r0: original string
2989  // r1: instance type
2990  // r2: length
2991  // r3: from index (untagged)
2992  Label underlying_unpacked, sliced_string, seq_or_external_string;
2993  // If the string is not indirect, it can only be sequential or external.
2996  __ tst(r1, Operand(kIsIndirectStringMask));
2997  __ b(eq, &seq_or_external_string);
2998 
2999  __ tst(r1, Operand(kSlicedNotConsMask));
3000  __ b(ne, &sliced_string);
3001  // Cons string. Check whether it is flat, then fetch first part.
3003  __ CompareRoot(r5, Heap::kempty_stringRootIndex);
3004  __ b(ne, &runtime);
3006  // Update instance type.
3009  __ jmp(&underlying_unpacked);
3010 
3011  __ bind(&sliced_string);
3012  // Sliced string. Fetch parent and correct start index by offset.
3015  __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
3016  // Update instance type.
3019  __ jmp(&underlying_unpacked);
3020 
3021  __ bind(&seq_or_external_string);
3022  // Sequential or external string. Just move string to the expected register.
3023  __ mov(r5, r0);
3024 
3025  __ bind(&underlying_unpacked);
3026 
3027  if (FLAG_string_slices) {
3028  Label copy_routine;
3029  // r5: underlying subject string
3030  // r1: instance type of underlying subject string
3031  // r2: length
3032  // r3: adjusted start index (untagged)
3033  __ cmp(r2, Operand(SlicedString::kMinLength));
3034  // Short slice. Copy instead of slicing.
3035  __ b(lt, &copy_routine);
3036  // Allocate new sliced string. At this point we do not reload the instance
3037  // type including the string encoding because we simply rely on the info
3038  // provided by the original string. It does not matter if the original
3039  // string's encoding is wrong because we always have to recheck encoding of
3040  // the newly created string's parent anyways due to externalized strings.
3041  Label two_byte_slice, set_slice_header;
3044  __ tst(r1, Operand(kStringEncodingMask));
3045  __ b(eq, &two_byte_slice);
3046  __ AllocateOneByteSlicedString(r0, r2, r6, r4, &runtime);
3047  __ jmp(&set_slice_header);
3048  __ bind(&two_byte_slice);
3049  __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
3050  __ bind(&set_slice_header);
3051  __ mov(r3, Operand(r3, LSL, 1));
3054  __ jmp(&return_r0);
3055 
3056  __ bind(&copy_routine);
3057  }
3058 
3059  // r5: underlying subject string
3060  // r1: instance type of underlying subject string
3061  // r2: length
3062  // r3: adjusted start index (untagged)
3063  Label two_byte_sequential, sequential_string, allocate_result;
3066  __ tst(r1, Operand(kExternalStringTag));
3067  __ b(eq, &sequential_string);
3068 
3069  // Handle external string.
3070  // Rule out short external strings.
3072  __ tst(r1, Operand(kShortExternalStringTag));
3073  __ b(ne, &runtime);
3075  // r5 already points to the first character of underlying string.
3076  __ jmp(&allocate_result);
3077 
3078  __ bind(&sequential_string);
3079  // Locate first character of underlying subject string.
3082 
3083  __ bind(&allocate_result);
3084  // Sequential acii string. Allocate the result.
3086  __ tst(r1, Operand(kStringEncodingMask));
3087  __ b(eq, &two_byte_sequential);
3088 
3089  // Allocate and copy the resulting one-byte string.
3090  __ AllocateOneByteString(r0, r2, r4, r6, r1, &runtime);
3091 
3092  // Locate first character of substring to copy.
3093  __ add(r5, r5, r3);
3094  // Locate first character of result.
3096 
3097  // r0: result string
3098  // r1: first character of result string
3099  // r2: result string length
3100  // r5: first character of substring to copy
3103  masm, r1, r5, r2, r3, String::ONE_BYTE_ENCODING);
3104  __ jmp(&return_r0);
3105 
3106  // Allocate and copy the resulting two-byte string.
3107  __ bind(&two_byte_sequential);
3108  __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
3109 
3110  // Locate first character of substring to copy.
3111  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3112  __ add(r5, r5, Operand(r3, LSL, 1));
3113  // Locate first character of result.
3115 
3116  // r0: result string.
3117  // r1: first character of result.
3118  // r2: result length.
3119  // r5: first character of substring to copy.
3122  masm, r1, r5, r2, r3, String::TWO_BYTE_ENCODING);
3123 
3124  __ bind(&return_r0);
3125  Counters* counters = isolate()->counters();
3126  __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
3127  __ Drop(3);
3128  __ Ret();
3129 
3130  // Just jump to runtime to create the sub string.
3131  __ bind(&runtime);
3132  __ TailCallRuntime(Runtime::kSubString, 3, 1);
3133 
3134  __ bind(&single_char);
3135  // r0: original string
3136  // r1: instance type
3137  // r2: length
3138  // r3: from index (untagged)
3139  __ SmiTag(r3, r3);
3140  StringCharAtGenerator generator(
3141  r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
3142  generator.GenerateFast(masm);
3143  __ Drop(3);
3144  __ Ret();
3145  generator.SkipSlow(masm, &runtime);
3146 }
3147 
3148 
3150  MacroAssembler* masm, Register left, Register right, Register scratch1,
3151  Register scratch2, Register scratch3) {
3152  Register length = scratch1;
3153 
3154  // Compare lengths.
3155  Label strings_not_equal, check_zero_length;
3156  __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
3157  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3158  __ cmp(length, scratch2);
3159  __ b(eq, &check_zero_length);
3160  __ bind(&strings_not_equal);
3161  __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
3162  __ Ret();
3163 
3164  // Check if the length is zero.
3165  Label compare_chars;
3166  __ bind(&check_zero_length);
3167  STATIC_ASSERT(kSmiTag == 0);
3168  __ cmp(length, Operand::Zero());
3169  __ b(ne, &compare_chars);
3170  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3171  __ Ret();
3172 
3173  // Compare characters.
3174  __ bind(&compare_chars);
3175  GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
3176  &strings_not_equal);
3177 
3178  // Characters are equal.
3179  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3180  __ Ret();
3181 }
3182 
3183 
3185  MacroAssembler* masm, Register left, Register right, Register scratch1,
3186  Register scratch2, Register scratch3, Register scratch4) {
3187  Label result_not_equal, compare_lengths;
3188  // Find minimum length and length difference.
3189  __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
3190  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3191  __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
3192  Register length_delta = scratch3;
3193  __ mov(scratch1, scratch2, LeaveCC, gt);
3194  Register min_length = scratch1;
3195  STATIC_ASSERT(kSmiTag == 0);
3196  __ cmp(min_length, Operand::Zero());
3197  __ b(eq, &compare_lengths);
3198 
3199  // Compare loop.
3200  GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3201  scratch4, &result_not_equal);
3202 
3203  // Compare lengths - strings up to min-length are equal.
3204  __ bind(&compare_lengths);
3205  DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3206  // Use length_delta as result if it's zero.
3207  __ mov(r0, Operand(length_delta), SetCC);
3208  __ bind(&result_not_equal);
3209  // Conditionally update the result based either on length_delta or
3210  // the last comparion performed in the loop above.
3211  __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
3212  __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
3213  __ Ret();
3214 }
3215 
3216 
3218  MacroAssembler* masm, Register left, Register right, Register length,
3219  Register scratch1, Register scratch2, Label* chars_not_equal) {
3220  // Change index to run from -length to -1 by adding length to string
3221  // start. This means that loop ends when index reaches zero, which
3222  // doesn't need an additional compare.
3223  __ SmiUntag(length);
3224  __ add(scratch1, length,
3226  __ add(left, left, Operand(scratch1));
3227  __ add(right, right, Operand(scratch1));
3228  __ rsb(length, length, Operand::Zero());
3229  Register index = length; // index = -length;
3230 
3231  // Compare loop.
3232  Label loop;
3233  __ bind(&loop);
3234  __ ldrb(scratch1, MemOperand(left, index));
3235  __ ldrb(scratch2, MemOperand(right, index));
3236  __ cmp(scratch1, scratch2);
3237  __ b(ne, chars_not_equal);
3238  __ add(index, index, Operand(1), SetCC);
3239  __ b(ne, &loop);
3240 }
3241 
3242 
3243 void StringCompareStub::Generate(MacroAssembler* masm) {
3244  Label runtime;
3245 
3246  Counters* counters = isolate()->counters();
3247 
3248  // Stack frame on entry.
3249  // sp[0]: right string
3250  // sp[4]: left string
3251  __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
3252 
3253  Label not_same;
3254  __ cmp(r0, r1);
3255  __ b(ne, &not_same);
3256  STATIC_ASSERT(EQUAL == 0);
3257  STATIC_ASSERT(kSmiTag == 0);
3258  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3259  __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
3260  __ add(sp, sp, Operand(2 * kPointerSize));
3261  __ Ret();
3262 
3263  __ bind(&not_same);
3264 
3265  // Check that both objects are sequential one-byte strings.
3266  __ JumpIfNotBothSequentialOneByteStrings(r1, r0, r2, r3, &runtime);
3267 
3268  // Compare flat one-byte strings natively. Remove arguments from stack first.
3269  __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
3270  __ add(sp, sp, Operand(2 * kPointerSize));
3272 
3273  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
3274  // tagged as a small integer.
3275  __ bind(&runtime);
3276  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3277 }
3278 
3279 
3280 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3281  // ----------- S t a t e -------------
3282  // -- r1 : left
3283  // -- r0 : right
3284  // -- lr : return address
3285  // -----------------------------------
3286 
3287  // Load r2 with the allocation site. We stick an undefined dummy value here
3288  // and replace it with the real allocation site later when we instantiate this
3289  // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3290  __ Move(r2, handle(isolate()->heap()->undefined_value()));
3291 
3292  // Make sure that we actually patched the allocation site.
3293  if (FLAG_debug_code) {
3294  __ tst(r2, Operand(kSmiTagMask));
3295  __ Assert(ne, kExpectedAllocationSite);
3296  __ push(r2);
3298  __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
3299  __ cmp(r2, ip);
3300  __ pop(r2);
3301  __ Assert(eq, kExpectedAllocationSite);
3302  }
3303 
3304  // Tail call into the stub that handles binary operations with allocation
3305  // sites.
3306  BinaryOpWithAllocationSiteStub stub(isolate(), state());
3307  __ TailCallStub(&stub);
3308 }
3309 
3310 
3311 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3313  Label miss;
3314  __ orr(r2, r1, r0);
3315  __ JumpIfNotSmi(r2, &miss);
3316 
3317  if (GetCondition() == eq) {
3318  // For equality we do not care about the sign of the result.
3319  __ sub(r0, r0, r1, SetCC);
3320  } else {
3321  // Untag before subtracting to avoid handling overflow.
3322  __ SmiUntag(r1);
3323  __ sub(r0, r1, Operand::SmiUntag(r0));
3324  }
3325  __ Ret();
3326 
3327  __ bind(&miss);
3328  GenerateMiss(masm);
3329 }
3330 
3331 
3332 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3334 
3335  Label generic_stub;
3336  Label unordered, maybe_undefined1, maybe_undefined2;
3337  Label miss;
3338 
3339  if (left() == CompareICState::SMI) {
3340  __ JumpIfNotSmi(r1, &miss);
3341  }
3342  if (right() == CompareICState::SMI) {
3343  __ JumpIfNotSmi(r0, &miss);
3344  }
3345 
3346  // Inlining the double comparison and falling back to the general compare
3347  // stub if NaN is involved.
3348  // Load left and right operand.
3349  Label done, left, left_smi, right_smi;
3350  __ JumpIfSmi(r0, &right_smi);
3351  __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3353  __ sub(r2, r0, Operand(kHeapObjectTag));
3354  __ vldr(d1, r2, HeapNumber::kValueOffset);
3355  __ b(&left);
3356  __ bind(&right_smi);
3357  __ SmiToDouble(d1, r0);
3358 
3359  __ bind(&left);
3360  __ JumpIfSmi(r1, &left_smi);
3361  __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3363  __ sub(r2, r1, Operand(kHeapObjectTag));
3364  __ vldr(d0, r2, HeapNumber::kValueOffset);
3365  __ b(&done);
3366  __ bind(&left_smi);
3367  __ SmiToDouble(d0, r1);
3368 
3369  __ bind(&done);
3370  // Compare operands.
3371  __ VFPCompareAndSetFlags(d0, d1);
3372 
3373  // Don't base result on status bits when a NaN is involved.
3374  __ b(vs, &unordered);
3375 
3376  // Return a result of -1, 0, or 1, based on status bits.
3377  __ mov(r0, Operand(EQUAL), LeaveCC, eq);
3378  __ mov(r0, Operand(LESS), LeaveCC, lt);
3379  __ mov(r0, Operand(GREATER), LeaveCC, gt);
3380  __ Ret();
3381 
3382  __ bind(&unordered);
3383  __ bind(&generic_stub);
3384  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
3386  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3387 
3388  __ bind(&maybe_undefined1);
3390  __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
3391  __ b(ne, &miss);
3392  __ JumpIfSmi(r1, &unordered);
3393  __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
3394  __ b(ne, &maybe_undefined2);
3395  __ jmp(&unordered);
3396  }
3397 
3398  __ bind(&maybe_undefined2);
3400  __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
3401  __ b(eq, &unordered);
3402  }
3403 
3404  __ bind(&miss);
3405  GenerateMiss(masm);
3406 }
3407 
3408 
3409 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3411  Label miss;
3412 
3413  // Registers containing left and right operands respectively.
3414  Register left = r1;
3415  Register right = r0;
3416  Register tmp1 = r2;
3417  Register tmp2 = r3;
3418 
3419  // Check that both operands are heap objects.
3420  __ JumpIfEitherSmi(left, right, &miss);
3421 
3422  // Check that both operands are internalized strings.
3425  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3426  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3428  __ orr(tmp1, tmp1, Operand(tmp2));
3429  __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3430  __ b(ne, &miss);
3431 
3432  // Internalized strings are compared by identity.
3433  __ cmp(left, right);
3434  // Make sure r0 is non-zero. At this point input operands are
3435  // guaranteed to be non-zero.
3436  DCHECK(right.is(r0));
3437  STATIC_ASSERT(EQUAL == 0);
3438  STATIC_ASSERT(kSmiTag == 0);
3439  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3440  __ Ret();
3441 
3442  __ bind(&miss);
3443  GenerateMiss(masm);
3444 }
3445 
3446 
3447 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3449  DCHECK(GetCondition() == eq);
3450  Label miss;
3451 
3452  // Registers containing left and right operands respectively.
3453  Register left = r1;
3454  Register right = r0;
3455  Register tmp1 = r2;
3456  Register tmp2 = r3;
3457 
3458  // Check that both operands are heap objects.
3459  __ JumpIfEitherSmi(left, right, &miss);
3460 
3461  // Check that both operands are unique names. This leaves the instance
3462  // types loaded in tmp1 and tmp2.
3465  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3466  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3467 
3468  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
3469  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
3470 
3471  // Unique names are compared by identity.
3472  __ cmp(left, right);
3473  // Make sure r0 is non-zero. At this point input operands are
3474  // guaranteed to be non-zero.
3475  DCHECK(right.is(r0));
3476  STATIC_ASSERT(EQUAL == 0);
3477  STATIC_ASSERT(kSmiTag == 0);
3478  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3479  __ Ret();
3480 
3481  __ bind(&miss);
3482  GenerateMiss(masm);
3483 }
3484 
3485 
3486 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3488  Label miss;
3489 
3490  bool equality = Token::IsEqualityOp(op());
3491 
3492  // Registers containing left and right operands respectively.
3493  Register left = r1;
3494  Register right = r0;
3495  Register tmp1 = r2;
3496  Register tmp2 = r3;
3497  Register tmp3 = r4;
3498  Register tmp4 = r5;
3499 
3500  // Check that both operands are heap objects.
3501  __ JumpIfEitherSmi(left, right, &miss);
3502 
3503  // Check that both operands are strings. This leaves the instance
3504  // types loaded in tmp1 and tmp2.
3507  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3508  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3510  __ orr(tmp3, tmp1, tmp2);
3511  __ tst(tmp3, Operand(kIsNotStringMask));
3512  __ b(ne, &miss);
3513 
3514  // Fast check for identical strings.
3515  __ cmp(left, right);
3516  STATIC_ASSERT(EQUAL == 0);
3517  STATIC_ASSERT(kSmiTag == 0);
3518  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3519  __ Ret(eq);
3520 
3521  // Handle not identical strings.
3522 
3523  // Check that both strings are internalized strings. If they are, we're done
3524  // because we already know they are not identical. We know they are both
3525  // strings.
3526  if (equality) {
3527  DCHECK(GetCondition() == eq);
3529  __ orr(tmp3, tmp1, Operand(tmp2));
3530  __ tst(tmp3, Operand(kIsNotInternalizedMask));
3531  // Make sure r0 is non-zero. At this point input operands are
3532  // guaranteed to be non-zero.
3533  DCHECK(right.is(r0));
3534  __ Ret(eq);
3535  }
3536 
3537  // Check that both strings are sequential one-byte.
3538  Label runtime;
3539  __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
3540  &runtime);
3541 
3542  // Compare flat one-byte strings. Returns when done.
3543  if (equality) {
3545  tmp3);
3546  } else {
3548  tmp2, tmp3, tmp4);
3549  }
3550 
3551  // Handle more complex cases in runtime.
3552  __ bind(&runtime);
3553  __ Push(left, right);
3554  if (equality) {
3555  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3556  } else {
3557  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3558  }
3559 
3560  __ bind(&miss);
3561  GenerateMiss(masm);
3562 }
3563 
3564 
3565 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3567  Label miss;
3568  __ and_(r2, r1, Operand(r0));
3569  __ JumpIfSmi(r2, &miss);
3570 
3571  __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
3572  __ b(ne, &miss);
3573  __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
3574  __ b(ne, &miss);
3575 
3576  DCHECK(GetCondition() == eq);
3577  __ sub(r0, r0, Operand(r1));
3578  __ Ret();
3579 
3580  __ bind(&miss);
3581  GenerateMiss(masm);
3582 }
3583 
3584 
3585 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3586  Label miss;
3587  __ and_(r2, r1, Operand(r0));
3588  __ JumpIfSmi(r2, &miss);
3591  __ cmp(r2, Operand(known_map_));
3592  __ b(ne, &miss);
3593  __ cmp(r3, Operand(known_map_));
3594  __ b(ne, &miss);
3595 
3596  __ sub(r0, r0, Operand(r1));
3597  __ Ret();
3598 
3599  __ bind(&miss);
3600  GenerateMiss(masm);
3601 }
3602 
3603 
3604 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3605  {
3606  // Call the runtime system in a fresh internal frame.
3607  ExternalReference miss =
3608  ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
3609 
3610  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3611  __ Push(r1, r0);
3612  __ Push(lr, r1, r0);
3613  __ mov(ip, Operand(Smi::FromInt(op())));
3614  __ push(ip);
3615  __ CallExternalReference(miss, 3);
3616  // Compute the entry point of the rewritten stub.
3617  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
3618  // Restore registers.
3619  __ pop(lr);
3620  __ Pop(r1, r0);
3621  }
3622 
3623  __ Jump(r2);
3624 }
3625 
3626 
3627 void DirectCEntryStub::Generate(MacroAssembler* masm) {
3628  // Place the return address on the stack, making the call
3629  // GC safe. The RegExp backend also relies on this.
3630  __ str(lr, MemOperand(sp, 0));
3631  __ blx(ip); // Call the C++ function.
3632  __ VFPEnsureFPSCRState(r2);
3633  __ ldr(pc, MemOperand(sp, 0));
3634 }
3635 
3636 
3637 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
3638  Register target) {
3639  intptr_t code =
3640  reinterpret_cast<intptr_t>(GetCode().location());
3641  __ Move(ip, target);
3642  __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
3643  __ blx(lr); // Call the stub.
3644 }
3645 
3646 
3647 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3648  Label* miss,
3649  Label* done,
3650  Register receiver,
3651  Register properties,
3652  Handle<Name> name,
3653  Register scratch0) {
3654  DCHECK(name->IsUniqueName());
3655  // If names of slots in range from 1 to kProbes - 1 for the hash value are
3656  // not equal to the name and kProbes-th slot is not used (its name is the
3657  // undefined value), it guarantees the hash table doesn't contain the
3658  // property. It's true even if some slots represent deleted properties
3659  // (their names are the hole value).
3660  for (int i = 0; i < kInlinedProbes; i++) {
3661  // scratch0 points to properties hash.
3662  // Compute the masked index: (hash + i + i * i) & mask.
3663  Register index = scratch0;
3664  // Capacity is smi 2^n.
3665  __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
3666  __ sub(index, index, Operand(1));
3667  __ and_(index, index, Operand(
3668  Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
3669 
3670  // Scale the index by multiplying by the entry size.
3672  __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
3673 
3674  Register entity_name = scratch0;
3675  // Having undefined at this place means the name is not contained.
3676  DCHECK_EQ(kSmiTagSize, 1);
3677  Register tmp = properties;
3678  __ add(tmp, properties, Operand(index, LSL, 1));
3679  __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
3680 
3681  DCHECK(!tmp.is(entity_name));
3682  __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
3683  __ cmp(entity_name, tmp);
3684  __ b(eq, done);
3685 
3686  // Load the hole ready for use below:
3687  __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
3688 
3689  // Stop if found the property.
3690  __ cmp(entity_name, Operand(Handle<Name>(name)));
3691  __ b(eq, miss);
3692 
3693  Label good;
3694  __ cmp(entity_name, tmp);
3695  __ b(eq, &good);
3696 
3697  // Check if the entry name is not a unique name.
3698  __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
3699  __ ldrb(entity_name,
3701  __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
3702  __ bind(&good);
3703 
3704  // Restore the properties.
3705  __ ldr(properties,
3707  }
3708 
3709  const int spill_mask =
3710  (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
3711  r2.bit() | r1.bit() | r0.bit());
3712 
3713  __ stm(db_w, sp, spill_mask);
3715  __ mov(r1, Operand(Handle<Name>(name)));
3716  NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
3717  __ CallStub(&stub);
3718  __ cmp(r0, Operand::Zero());
3719  __ ldm(ia_w, sp, spill_mask);
3720 
3721  __ b(eq, done);
3722  __ b(ne, miss);
3723 }
3724 
3725 
3726 // Probe the name dictionary in the |elements| register. Jump to the
3727 // |done| label if a property with the given name is found. Jump to
3728 // the |miss| label otherwise.
3729 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
3730 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
3731  Label* miss,
3732  Label* done,
3733  Register elements,
3734  Register name,
3735  Register scratch1,
3736  Register scratch2) {
3737  DCHECK(!elements.is(scratch1));
3738  DCHECK(!elements.is(scratch2));
3739  DCHECK(!name.is(scratch1));
3740  DCHECK(!name.is(scratch2));
3741 
3742  __ AssertName(name);
3743 
3744  // Compute the capacity mask.
3745  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
3746  __ SmiUntag(scratch1);
3747  __ sub(scratch1, scratch1, Operand(1));
3748 
3749  // Generate an unrolled loop that performs a few probes before
3750  // giving up. Measurements done on Gmail indicate that 2 probes
3751  // cover ~93% of loads from dictionaries.
3752  for (int i = 0; i < kInlinedProbes; i++) {
3753  // Compute the masked index: (hash + i + i * i) & mask.
3754  __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
3755  if (i > 0) {
3756  // Add the probe offset (i + i * i) left shifted to avoid right shifting
3757  // the hash in a separate instruction. The value hash + i + i * i is right
3758  // shifted in the following and instruction.
3759  DCHECK(NameDictionary::GetProbeOffset(i) <
3760  1 << (32 - Name::kHashFieldOffset));
3761  __ add(scratch2, scratch2, Operand(
3762  NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3763  }
3764  __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
3765 
3766  // Scale the index by multiplying by the element size.
3768  // scratch2 = scratch2 * 3.
3769  __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
3770 
3771  // Check if the key is identical to the name.
3772  __ add(scratch2, elements, Operand(scratch2, LSL, 2));
3773  __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
3774  __ cmp(name, Operand(ip));
3775  __ b(eq, done);
3776  }
3777 
3778  const int spill_mask =
3779  (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
3780  r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
3781  ~(scratch1.bit() | scratch2.bit());
3782 
3783  __ stm(db_w, sp, spill_mask);
3784  if (name.is(r0)) {
3785  DCHECK(!elements.is(r1));
3786  __ Move(r1, name);
3787  __ Move(r0, elements);
3788  } else {
3789  __ Move(r0, elements);
3790  __ Move(r1, name);
3791  }
3792  NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
3793  __ CallStub(&stub);
3794  __ cmp(r0, Operand::Zero());
3795  __ mov(scratch2, Operand(r2));
3796  __ ldm(ia_w, sp, spill_mask);
3797 
3798  __ b(ne, done);
3799  __ b(eq, miss);
3800 }
3801 
3802 
3803 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
3804  // This stub overrides SometimesSetsUpAFrame() to return false. That means
3805  // we cannot call anything that could cause a GC from this stub.
3806  // Registers:
3807  // result: NameDictionary to probe
3808  // r1: key
3809  // dictionary: NameDictionary to probe.
3810  // index: will hold an index of entry if lookup is successful.
3811  // might alias with result_.
3812  // Returns:
3813  // result_ is zero if lookup failed, non zero otherwise.
3814 
3815  Register result = r0;
3816  Register dictionary = r0;
3817  Register key = r1;
3818  Register index = r2;
3819  Register mask = r3;
3820  Register hash = r4;
3821  Register undefined = r5;
3822  Register entry_key = r6;
3823 
3824  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
3825 
3827  __ SmiUntag(mask);
3828  __ sub(mask, mask, Operand(1));
3829 
3830  __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
3831 
3832  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
3833 
3834  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
3835  // Compute the masked index: (hash + i + i * i) & mask.
3836  // Capacity is smi 2^n.
3837  if (i > 0) {
3838  // Add the probe offset (i + i * i) left shifted to avoid right shifting
3839  // the hash in a separate instruction. The value hash + i + i * i is right
3840  // shifted in the following and instruction.
3841  DCHECK(NameDictionary::GetProbeOffset(i) <
3842  1 << (32 - Name::kHashFieldOffset));
3843  __ add(index, hash, Operand(
3844  NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3845  } else {
3846  __ mov(index, Operand(hash));
3847  }
3848  __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
3849 
3850  // Scale the index by multiplying by the entry size.
3852  __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
3853 
3854  DCHECK_EQ(kSmiTagSize, 1);
3855  __ add(index, dictionary, Operand(index, LSL, 2));
3856  __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
3857 
3858  // Having undefined at this place means the name is not contained.
3859  __ cmp(entry_key, Operand(undefined));
3860  __ b(eq, &not_in_dictionary);
3861 
3862  // Stop if found the property.
3863  __ cmp(entry_key, Operand(key));
3864  __ b(eq, &in_dictionary);
3865 
3866  if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
3867  // Check if the entry name is not a unique name.
3868  __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
3869  __ ldrb(entry_key,
3871  __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
3872  }
3873  }
3874 
3875  __ bind(&maybe_in_dictionary);
3876  // If we are doing negative lookup then probing failure should be
3877  // treated as a lookup success. For positive lookup probing failure
3878  // should be treated as lookup failure.
3879  if (mode() == POSITIVE_LOOKUP) {
3880  __ mov(result, Operand::Zero());
3881  __ Ret();
3882  }
3883 
3884  __ bind(&in_dictionary);
3885  __ mov(result, Operand(1));
3886  __ Ret();
3887 
3888  __ bind(&not_in_dictionary);
3889  __ mov(result, Operand::Zero());
3890  __ Ret();
3891 }
3892 
3893 
3895  Isolate* isolate) {
3896  StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
3897  stub1.GetCode();
3898  // Hydrogen code stubs need stub2 at snapshot time.
3899  StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
3900  stub2.GetCode();
3901 }
3902 
3903 
3904 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
3905 // the value has just been written into the object, now this stub makes sure
3906 // we keep the GC informed. The word in the object where the value has been
3907 // written is in the address register.
3908 void RecordWriteStub::Generate(MacroAssembler* masm) {
3909  Label skip_to_incremental_noncompacting;
3910  Label skip_to_incremental_compacting;
3911 
3912  // The first two instructions are generated with labels so as to get the
3913  // offset fixed up correctly by the bind(Label*) call. We patch it back and
3914  // forth between a compare instructions (a nop in this position) and the
3915  // real branch when we start and stop incremental heap marking.
3916  // See RecordWriteStub::Patch for details.
3917  {
3918  // Block literal pool emission, as the position of these two instructions
3919  // is assumed by the patching code.
3920  Assembler::BlockConstPoolScope block_const_pool(masm);
3921  __ b(&skip_to_incremental_noncompacting);
3922  __ b(&skip_to_incremental_compacting);
3923  }
3924 
3926  __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3928  }
3929  __ Ret();
3930 
3931  __ bind(&skip_to_incremental_noncompacting);
3933 
3934  __ bind(&skip_to_incremental_compacting);
3936 
3937  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
3938  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
3939  DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
3940  DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
3941  PatchBranchIntoNop(masm, 0);
3943 }
3944 
3945 
3946 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
3947  regs_.Save(masm);
3948 
3950  Label dont_need_remembered_set;
3951 
3952  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
3953  __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
3954  regs_.scratch0(),
3955  &dont_need_remembered_set);
3956 
3957  __ CheckPageFlag(regs_.object(),
3958  regs_.scratch0(),
3960  ne,
3961  &dont_need_remembered_set);
3962 
3963  // First notify the incremental marker if necessary, then update the
3964  // remembered set.
3968  regs_.Restore(masm);
3969  __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3971 
3972  __ bind(&dont_need_remembered_set);
3973  }
3974 
3978  regs_.Restore(masm);
3979  __ Ret();
3980 }
3981 
3982 
3983 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
3985  int argument_count = 3;
3986  __ PrepareCallCFunction(argument_count, regs_.scratch0());
3987  Register address =
3989  DCHECK(!address.is(regs_.object()));
3990  DCHECK(!address.is(r0));
3991  __ Move(address, regs_.address());
3992  __ Move(r0, regs_.object());
3993  __ Move(r1, address);
3994  __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
3995 
3996  AllowExternalCallThatCantCauseGC scope(masm);
3997  __ CallCFunction(
3998  ExternalReference::incremental_marking_record_write_function(isolate()),
3999  argument_count);
4001 }
4002 
4003 
4005  MacroAssembler* masm,
4006  OnNoNeedToInformIncrementalMarker on_no_need,
4007  Mode mode) {
4008  Label on_black;
4009  Label need_incremental;
4010  Label need_incremental_pop_scratch;
4011 
4012  __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4013  __ ldr(regs_.scratch1(),
4016  __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
4017  __ str(regs_.scratch1(),
4020  __ b(mi, &need_incremental);
4021 
4022  // Let's look at the color of the object: If it is not black we don't have
4023  // to inform the incremental marker.
4024  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4025 
4026  regs_.Restore(masm);
4028  __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4030  } else {
4031  __ Ret();
4032  }
4033 
4034  __ bind(&on_black);
4035 
4036  // Get the value from the slot.
4037  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
4038 
4039  if (mode == INCREMENTAL_COMPACTION) {
4040  Label ensure_not_white;
4041 
4042  __ CheckPageFlag(regs_.scratch0(), // Contains value.
4043  regs_.scratch1(), // Scratch.
4045  eq,
4046  &ensure_not_white);
4047 
4048  __ CheckPageFlag(regs_.object(),
4049  regs_.scratch1(), // Scratch.
4051  eq,
4052  &need_incremental);
4053 
4054  __ bind(&ensure_not_white);
4055  }
4056 
4057  // We need extra registers for this, so we push the object and the address
4058  // register temporarily.
4059  __ Push(regs_.object(), regs_.address());
4060  __ EnsureNotWhite(regs_.scratch0(), // The value.
4061  regs_.scratch1(), // Scratch.
4062  regs_.object(), // Scratch.
4063  regs_.address(), // Scratch.
4064  &need_incremental_pop_scratch);
4065  __ Pop(regs_.object(), regs_.address());
4066 
4067  regs_.Restore(masm);
4069  __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4071  } else {
4072  __ Ret();
4073  }
4074 
4075  __ bind(&need_incremental_pop_scratch);
4076  __ Pop(regs_.object(), regs_.address());
4077 
4078  __ bind(&need_incremental);
4079 
4080  // Fall through when we need to inform the incremental marker.
4081 }
4082 
4083 
4084 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4085  // ----------- S t a t e -------------
4086  // -- r0 : element value to store
4087  // -- r3 : element index as smi
4088  // -- sp[0] : array literal index in function as smi
4089  // -- sp[4] : array literal
4090  // clobbers r1, r2, r4
4091  // -----------------------------------
4092 
4093  Label element_done;
4094  Label double_elements;
4095  Label smi_element;
4096  Label slow_elements;
4097  Label fast_elements;
4098 
4099  // Get array literal index, array literal and its map.
4100  __ ldr(r4, MemOperand(sp, 0 * kPointerSize));
4101  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
4103 
4104  __ CheckFastElements(r2, r5, &double_elements);
4105  // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
4106  __ JumpIfSmi(r0, &smi_element);
4107  __ CheckFastSmiElements(r2, r5, &fast_elements);
4108 
4109  // Store into the array literal requires a elements transition. Call into
4110  // the runtime.
4111  __ bind(&slow_elements);
4112  // call.
4113  __ Push(r1, r3, r0);
4116  __ Push(r5, r4);
4117  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4118 
4119  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4120  __ bind(&fast_elements);
4122  __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
4123  __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4124  __ str(r0, MemOperand(r6, 0));
4125  // Update the write barrier for the array store.
4126  __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
4128  __ Ret();
4129 
4130  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4131  // and value is Smi.
4132  __ bind(&smi_element);
4134  __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
4136  __ Ret();
4137 
4138  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
4139  __ bind(&double_elements);
4141  __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements);
4142  __ Ret();
4143 }
4144 
4145 
4146 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4147  CEntryStub ces(isolate(), 1, kSaveFPRegs);
4148  __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4149  int parameter_count_offset =
4151  __ ldr(r1, MemOperand(fp, parameter_count_offset));
4153  __ add(r1, r1, Operand(1));
4154  }
4155  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4156  __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
4157  __ add(sp, sp, r1);
4158  __ Ret();
4159 }
4160 
4161 
4162 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4163  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4164  VectorLoadStub stub(isolate(), state());
4165  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4166 }
4167 
4168 
4169 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4170  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4171  VectorKeyedLoadStub stub(isolate());
4172  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4173 }
4174 
4175 
4176 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4177  if (masm->isolate()->function_entry_hook() != NULL) {
4178  ProfileEntryHookStub stub(masm->isolate());
4179  int code_size = masm->CallStubSize(&stub) + 2 * Assembler::kInstrSize;
4180  PredictableCodeSizeScope predictable(masm, code_size);
4181  __ push(lr);
4182  __ CallStub(&stub);
4183  __ pop(lr);
4184  }
4185 }
4186 
4187 
4188 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4189  // The entry hook is a "push lr" instruction, followed by a call.
4190  const int32_t kReturnAddressDistanceFromFunctionStart =
4192 
4193  // This should contain all kCallerSaved registers.
4194  const RegList kSavedRegs =
4195  1 << 0 | // r0
4196  1 << 1 | // r1
4197  1 << 2 | // r2
4198  1 << 3 | // r3
4199  1 << 5 | // r5
4200  1 << 9; // r9
4201  // We also save lr, so the count here is one higher than the mask indicates.
4202  const int32_t kNumSavedRegs = 7;
4203 
4204  DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved);
4205 
4206  // Save all caller-save registers as this may be called from anywhere.
4207  __ stm(db_w, sp, kSavedRegs | lr.bit());
4208 
4209  // Compute the function's address for the first argument.
4210  __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
4211 
4212  // The caller's return address is above the saved temporaries.
4213  // Grab that for the second argument to the hook.
4214  __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
4215 
4216  // Align the stack if necessary.
4217  int frame_alignment = masm->ActivationFrameAlignment();
4218  if (frame_alignment > kPointerSize) {
4219  __ mov(r5, sp);
4220  DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4221  __ and_(sp, sp, Operand(-frame_alignment));
4222  }
4223 
4224 #if V8_HOST_ARCH_ARM
4225  int32_t entry_hook =
4226  reinterpret_cast<int32_t>(isolate()->function_entry_hook());
4227  __ mov(ip, Operand(entry_hook));
4228 #else
4229  // Under the simulator we need to indirect the entry hook through a
4230  // trampoline function at a known address.
4231  // It additionally takes an isolate as a third parameter
4232  __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
4233 
4234  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4235  __ mov(ip, Operand(ExternalReference(&dispatcher,
4236  ExternalReference::BUILTIN_CALL,
4237  isolate())));
4238 #endif
4239  __ Call(ip);
4240 
4241  // Restore the stack pointer if needed.
4242  if (frame_alignment > kPointerSize) {
4243  __ mov(sp, r5);
4244  }
4245 
4246  // Also pop pc to get Ret(0).
4247  __ ldm(ia_w, sp, kSavedRegs | pc.bit());
4248 }
4249 
4250 
4251 template<class T>
4252 static void CreateArrayDispatch(MacroAssembler* masm,
4254  if (mode == DISABLE_ALLOCATION_SITES) {
4255  T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4256  __ TailCallStub(&stub);
4257  } else if (mode == DONT_OVERRIDE) {
4258  int last_index = GetSequenceIndexFromFastElementsKind(
4260  for (int i = 0; i <= last_index; ++i) {
4262  __ cmp(r3, Operand(kind));
4263  T stub(masm->isolate(), kind);
4264  __ TailCallStub(&stub, eq);
4265  }
4266 
4267  // If we reached this point there is a problem.
4268  __ Abort(kUnexpectedElementsKindInArrayConstructor);
4269  } else {
4270  UNREACHABLE();
4271  }
4272 }
4273 
4274 
4275 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4277  // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4278  // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4279  // r0 - number of arguments
4280  // r1 - constructor?
4281  // sp[0] - last argument
4282  Label normal_sequence;
4283  if (mode == DONT_OVERRIDE) {
4284  DCHECK(FAST_SMI_ELEMENTS == 0);
4286  DCHECK(FAST_ELEMENTS == 2);
4290 
4291  // is the low bit set? If so, we are holey and that is good.
4292  __ tst(r3, Operand(1));
4293  __ b(ne, &normal_sequence);
4294  }
4295 
4296  // look at the first argument
4297  __ ldr(r5, MemOperand(sp, 0));
4298  __ cmp(r5, Operand::Zero());
4299  __ b(eq, &normal_sequence);
4300 
4301  if (mode == DISABLE_ALLOCATION_SITES) {
4303  ElementsKind holey_initial = GetHoleyElementsKind(initial);
4304 
4305  ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4306  holey_initial,
4308  __ TailCallStub(&stub_holey);
4309 
4310  __ bind(&normal_sequence);
4311  ArraySingleArgumentConstructorStub stub(masm->isolate(),
4312  initial,
4314  __ TailCallStub(&stub);
4315  } else if (mode == DONT_OVERRIDE) {
4316  // We are going to create a holey array, but our kind is non-holey.
4317  // Fix kind and retry (only if we have an allocation site in the slot).
4318  __ add(r3, r3, Operand(1));
4319 
4320  if (FLAG_debug_code) {
4321  __ ldr(r5, FieldMemOperand(r2, 0));
4322  __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
4323  __ Assert(eq, kExpectedAllocationSite);
4324  }
4325 
4326  // Save the resulting elements kind in type info. We can't just store r3
4327  // in the AllocationSite::transition_info field because elements kind is
4328  // restricted to a portion of the field...upper bits need to be left alone.
4333 
4334  __ bind(&normal_sequence);
4335  int last_index = GetSequenceIndexFromFastElementsKind(
4337  for (int i = 0; i <= last_index; ++i) {
4339  __ cmp(r3, Operand(kind));
4340  ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4341  __ TailCallStub(&stub, eq);
4342  }
4343 
4344  // If we reached this point there is a problem.
4345  __ Abort(kUnexpectedElementsKindInArrayConstructor);
4346  } else {
4347  UNREACHABLE();
4348  }
4349 }
4350 
4351 
4352 template<class T>
4353 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4354  int to_index = GetSequenceIndexFromFastElementsKind(
4356  for (int i = 0; i <= to_index; ++i) {
4358  T stub(isolate, kind);
4359  stub.GetCode();
4361  T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4362  stub1.GetCode();
4363  }
4364  }
4365 }
4366 
4367 
4369  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4370  isolate);
4371  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4372  isolate);
4373  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4374  isolate);
4375 }
4376 
4377 
4379  Isolate* isolate) {
4381  for (int i = 0; i < 2; i++) {
4382  // For internal arrays we only need a few things
4383  InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4384  stubh1.GetCode();
4385  InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4386  stubh2.GetCode();
4387  InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4388  stubh3.GetCode();
4389  }
4390 }
4391 
4392 
4394  MacroAssembler* masm,
4396  if (argument_count() == ANY) {
4397  Label not_zero_case, not_one_case;
4398  __ tst(r0, r0);
4399  __ b(ne, &not_zero_case);
4400  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4401 
4402  __ bind(&not_zero_case);
4403  __ cmp(r0, Operand(1));
4404  __ b(gt, &not_one_case);
4405  CreateArrayDispatchOneArgument(masm, mode);
4406 
4407  __ bind(&not_one_case);
4408  CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4409  } else if (argument_count() == NONE) {
4410  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4411  } else if (argument_count() == ONE) {
4412  CreateArrayDispatchOneArgument(masm, mode);
4413  } else if (argument_count() == MORE_THAN_ONE) {
4414  CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4415  } else {
4416  UNREACHABLE();
4417  }
4418 }
4419 
4420 
4421 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4422  // ----------- S t a t e -------------
4423  // -- r0 : argc (only if argument_count() == ANY)
4424  // -- r1 : constructor
4425  // -- r2 : AllocationSite or undefined
4426  // -- sp[0] : return address
4427  // -- sp[4] : last argument
4428  // -----------------------------------
4429 
4430  if (FLAG_debug_code) {
4431  // The array construct code is only set for the global and natives
4432  // builtin Array functions which always have maps.
4433 
4434  // Initial map for the builtin Array function should be a map.
4436  // Will both indicate a NULL and a Smi.
4437  __ tst(r4, Operand(kSmiTagMask));
4438  __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
4439  __ CompareObjectType(r4, r4, r5, MAP_TYPE);
4440  __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4441 
4442  // We should either have undefined in r2 or a valid AllocationSite
4443  __ AssertUndefinedOrAllocationSite(r2, r4);
4444  }
4445 
4446  Label no_info;
4447  // Get the elements kind and case on that.
4448  __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
4449  __ b(eq, &no_info);
4450 
4452  __ SmiUntag(r3);
4456 
4457  __ bind(&no_info);
4459 }
4460 
4461 
4463  MacroAssembler* masm, ElementsKind kind) {
4464  __ cmp(r0, Operand(1));
4465 
4466  InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4467  __ TailCallStub(&stub0, lo);
4468 
4469  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4470  __ TailCallStub(&stubN, hi);
4471 
4472  if (IsFastPackedElementsKind(kind)) {
4473  // We might need to create a holey array
4474  // look at the first argument
4475  __ ldr(r3, MemOperand(sp, 0));
4476  __ cmp(r3, Operand::Zero());
4477 
4478  InternalArraySingleArgumentConstructorStub
4479  stub1_holey(isolate(), GetHoleyElementsKind(kind));
4480  __ TailCallStub(&stub1_holey, ne);
4481  }
4482 
4483  InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4484  __ TailCallStub(&stub1);
4485 }
4486 
4487 
4488 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4489  // ----------- S t a t e -------------
4490  // -- r0 : argc
4491  // -- r1 : constructor
4492  // -- sp[0] : return address
4493  // -- sp[4] : last argument
4494  // -----------------------------------
4495 
4496  if (FLAG_debug_code) {
4497  // The array construct code is only set for the global and natives
4498  // builtin Array functions which always have maps.
4499 
4500  // Initial map for the builtin Array function should be a map.
4502  // Will both indicate a NULL and a Smi.
4503  __ tst(r3, Operand(kSmiTagMask));
4504  __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
4505  __ CompareObjectType(r3, r3, r4, MAP_TYPE);
4506  __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4507  }
4508 
4509  // Figure out the right elements kind
4511  // Load the map's "bit field 2" into |result|. We only need the first byte,
4512  // but the following bit field extraction takes care of that anyway.
4514  // Retrieve elements_kind from bit field 2.
4515  __ DecodeField<Map::ElementsKindBits>(r3);
4516 
4517  if (FLAG_debug_code) {
4518  Label done;
4519  __ cmp(r3, Operand(FAST_ELEMENTS));
4520  __ b(eq, &done);
4521  __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
4522  __ Assert(eq,
4523  kInvalidElementsKindForInternalArrayOrInternalPackedArray);
4524  __ bind(&done);
4525  }
4526 
4527  Label fast_elements_case;
4528  __ cmp(r3, Operand(FAST_ELEMENTS));
4529  __ b(eq, &fast_elements_case);
4531 
4532  __ bind(&fast_elements_case);
4533  GenerateCase(masm, FAST_ELEMENTS);
4534 }
4535 
4536 
4537 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
4538  // ----------- S t a t e -------------
4539  // -- r0 : callee
4540  // -- r4 : call_data
4541  // -- r2 : holder
4542  // -- r1 : api_function_address
4543  // -- cp : context
4544  // --
4545  // -- sp[0] : last argument
4546  // -- ...
4547  // -- sp[(argc - 1)* 4] : first argument
4548  // -- sp[argc * 4] : receiver
4549  // -----------------------------------
4550 
4551  Register callee = r0;
4552  Register call_data = r4;
4553  Register holder = r2;
4554  Register api_function_address = r1;
4555  Register context = cp;
4556 
4557  int argc = this->argc();
4558  bool is_store = this->is_store();
4560 
4561  typedef FunctionCallbackArguments FCA;
4562 
4563  STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4564  STATIC_ASSERT(FCA::kCalleeIndex == 5);
4565  STATIC_ASSERT(FCA::kDataIndex == 4);
4566  STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4567  STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4568  STATIC_ASSERT(FCA::kIsolateIndex == 1);
4569  STATIC_ASSERT(FCA::kHolderIndex == 0);
4570  STATIC_ASSERT(FCA::kArgsLength == 7);
4571 
4572  // context save
4573  __ push(context);
4574  // load context from callee
4575  __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
4576 
4577  // callee
4578  __ push(callee);
4579 
4580  // call data
4581  __ push(call_data);
4582 
4583  Register scratch = call_data;
4584  if (!call_data_undefined) {
4585  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4586  }
4587  // return value
4588  __ push(scratch);
4589  // return value default
4590  __ push(scratch);
4591  // isolate
4592  __ mov(scratch,
4593  Operand(ExternalReference::isolate_address(isolate())));
4594  __ push(scratch);
4595  // holder
4596  __ push(holder);
4597 
4598  // Prepare arguments.
4599  __ mov(scratch, sp);
4600 
4601  // Allocate the v8::Arguments structure in the arguments' space since
4602  // it's not controlled by GC.
4603  const int kApiStackSpace = 4;
4604 
4605  FrameScope frame_scope(masm, StackFrame::MANUAL);
4606  __ EnterExitFrame(false, kApiStackSpace);
4607 
4608  DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
4609  // r0 = FunctionCallbackInfo&
4610  // Arguments is after the return address.
4611  __ add(r0, sp, Operand(1 * kPointerSize));
4612  // FunctionCallbackInfo::implicit_args_
4613  __ str(scratch, MemOperand(r0, 0 * kPointerSize));
4614  // FunctionCallbackInfo::values_
4615  __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
4616  __ str(ip, MemOperand(r0, 1 * kPointerSize));
4617  // FunctionCallbackInfo::length_ = argc
4618  __ mov(ip, Operand(argc));
4619  __ str(ip, MemOperand(r0, 2 * kPointerSize));
4620  // FunctionCallbackInfo::is_construct_call = 0
4621  __ mov(ip, Operand::Zero());
4622  __ str(ip, MemOperand(r0, 3 * kPointerSize));
4623 
4624  const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
4625  ExternalReference thunk_ref =
4626  ExternalReference::invoke_function_callback(isolate());
4627 
4628  AllowExternalCallThatCantCauseGC scope(masm);
4629  MemOperand context_restore_operand(
4630  fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
4631  // Stores return the first js argument
4632  int return_value_offset = 0;
4633  if (is_store) {
4634  return_value_offset = 2 + FCA::kArgsLength;
4635  } else {
4636  return_value_offset = 2 + FCA::kReturnValueOffset;
4637  }
4638  MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
4639 
4640  __ CallApiFunctionAndReturn(api_function_address,
4641  thunk_ref,
4642  kStackUnwindSpace,
4643  return_value_operand,
4644  &context_restore_operand);
4645 }
4646 
4647 
4648 void CallApiGetterStub::Generate(MacroAssembler* masm) {
4649  // ----------- S t a t e -------------
4650  // -- sp[0] : name
4651  // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
4652  // -- ...
4653  // -- r2 : api_function_address
4654  // -----------------------------------
4655 
4656  Register api_function_address = ApiGetterDescriptor::function_address();
4657  DCHECK(api_function_address.is(r2));
4658 
4659  __ mov(r0, sp); // r0 = Handle<Name>
4660  __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA
4661 
4662  const int kApiStackSpace = 1;
4663  FrameScope frame_scope(masm, StackFrame::MANUAL);
4664  __ EnterExitFrame(false, kApiStackSpace);
4665 
4666  // Create PropertyAccessorInfo instance on the stack above the exit frame with
4667  // r1 (internal::Object** args_) as the data.
4668  __ str(r1, MemOperand(sp, 1 * kPointerSize));
4669  __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
4670 
4671  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
4672 
4673  ExternalReference thunk_ref =
4674  ExternalReference::invoke_accessor_getter_callback(isolate());
4675  __ CallApiFunctionAndReturn(api_function_address,
4676  thunk_ref,
4677  kStackUnwindSpace,
4678  MemOperand(fp, 6 * kPointerSize),
4679  NULL);
4680 }
4681 
4682 
4683 #undef __
4684 
4685 } } // namespace v8::internal
4686 
4687 #endif // V8_TARGET_ARCH_ARM
#define kFirstCalleeSavedDoubleReg
#define kDoubleRegZero
#define kScratchDoubleReg
#define kLastCalleeSavedDoubleReg
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1591
static const int kTransitionInfoOffset
Definition: objects.h:8254
static const Register function_address()
void GenerateReadElement(MacroAssembler *masm)
void GenerateNewSloppySlow(MacroAssembler *masm)
void GenerateNewStrict(MacroAssembler *masm)
void GenerateNewSloppyFast(MacroAssembler *masm)
static void GenerateStubsAheadOfTime(Isolate *isolate)
void GenerateDispatchToArrayStub(MacroAssembler *masm, AllocationSiteOverrideMode mode)
ArgumentCountKey argument_count() const
Definition: code-stubs.h:732
static const int kInstrSize
friend class BlockConstPoolScope
static int GetBranchOffset(Instr instr)
static void GenerateAheadOfTime(Isolate *isolate)
Definition: code-stubs.cc:266
static const U kShift
Definition: utils.h:204
static const U kMask
Definition: utils.h:203
bool save_doubles() const
Definition: code-stubs.h:1423
static void GenerateAheadOfTime(Isolate *isolate)
CEntryStub(Isolate *isolate, int result_size, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
Definition: code-stubs.h:1406
STATIC_ASSERT(Code::kArgumentsBits+2<=kStubMinorKeyBits)
bool CallAsMethod() const
Definition: code-stubs.h:811
void GenerateMiss(MacroAssembler *masm)
virtual InlineCacheState GetICState() const OVERRIDE
Definition: code-stubs.h:804
static const int kValueOffset
Definition: objects.h:9446
static const int kHeaderSize
Definition: objects.h:5373
Condition GetCondition() const
Definition: code-stubs.cc:354
void GenerateInternalizedStrings(MacroAssembler *masm)
void GenerateStrings(MacroAssembler *masm)
CompareICState::State state() const
Definition: code-stubs.h:1278
Token::Value op() const
Definition: code-stubs.h:1268
void GenerateMiss(MacroAssembler *masm)
CompareICState::State left() const
Definition: code-stubs.h:1272
void GenerateGeneric(MacroAssembler *masm)
CompareICState::State right() const
Definition: code-stubs.h:1275
void GenerateObjects(MacroAssembler *masm)
CompareICStub(Isolate *isolate, Token::Value op, CompareICState::State left, CompareICState::State right, CompareICState::State state)
Definition: code-stubs.h:1256
void GenerateNumbers(MacroAssembler *masm)
void GenerateUniqueNames(MacroAssembler *masm)
void GenerateKnownObjects(MacroAssembler *masm)
void GenerateSmis(MacroAssembler *masm)
static const int kFirstOffset
Definition: objects.h:9061
static const int kMinLength
Definition: objects.h:9066
static const int kSecondOffset
Definition: objects.h:9062
static int SlotOffset(int index)
Definition: contexts.h:552
static void GenerateAheadOfTime(Isolate *isolate)
Definition: code-stubs.cc:725
void GenerateCall(MacroAssembler *masm, Register target)
Register source() const
Definition: code-stubs.h:1901
STATIC_ASSERT((1L<< kBitsPerRegisterNumber) >=Register::kNumRegisters)
Register destination() const
Definition: code-stubs.h:1904
static const int kCallerFPOffset
Definition: frames-arm.h:80
static const int kMaxShortLength
Definition: objects.h:9141
static const int kResourceDataOffset
Definition: objects.h:9138
static const int kLengthOffset
Definition: objects.h:2392
static const int kHeaderSize
Definition: objects.h:2393
static const int kNativeContextOffset
Definition: objects.h:7459
static const int kEntrySize
Definition: objects.h:3276
static const uint32_t kSignMask
Definition: objects.h:1522
static const int kValueOffset
Definition: objects.h:1506
static const int kMantissaBitsInTopWord
Definition: objects.h:1531
static const int kExponentBits
Definition: objects.h:1526
static const int kExponentBias
Definition: objects.h:1527
static const int kExponentShift
Definition: objects.h:1528
static const int kNonMantissaBitsInTopWord
Definition: objects.h:1532
static const int kMapOffset
Definition: objects.h:1427
static const int kStrictArgumentsObjectSize
Definition: heap.h:674
static const int kSloppyArgumentsObjectSize
Definition: heap.h:671
static const int kArgumentsCalleeIndex
Definition: heap.h:679
static const int kArgumentsLengthIndex
Definition: heap.h:677
void GenerateLightweightMiss(MacroAssembler *masm, ExternalReference miss)
bool HasCallSiteInlineCheck() const
Definition: code-stubs.h:700
bool HasArgsInRegisters() const
Definition: code-stubs.h:698
bool ReturnTrueFalseObject() const
Definition: code-stubs.h:704
static void GenerateStubsAheadOfTime(Isolate *isolate)
void GenerateCase(MacroAssembler *masm, ElementsKind kind)
static const int kJSRegexpStaticOffsetsVectorSize
Definition: isolate.h:984
StackFrame::Type type() const
Definition: code-stubs.h:1454
static const int kSharedFunctionInfoOffset
Definition: objects.h:7379
static const int kContextOffset
Definition: objects.h:7381
static const int kLiteralsOffset
Definition: objects.h:7382
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7377
static const int kHeaderSize
Definition: objects.h:2195
static const int kPropertiesOffset
Definition: objects.h:2193
static const int kElementsOffset
Definition: objects.h:2194
static const int kDataOneByteCodeOffset
Definition: objects.h:7813
static const int kIrregexpCaptureCountOffset
Definition: objects.h:7817
static const int kDataTagOffset
Definition: objects.h:7811
static const int kDataOffset
Definition: objects.h:7771
static const int kDataUC16CodeOffset
Definition: objects.h:7815
static const Register ReceiverRegister()
static const Register NameRegister()
static int ActivationFrameAlignment()
static const int kIsUndetectable
Definition: objects.h:6244
static const int kBitFieldOffset
Definition: objects.h:6228
static const int kInstanceTypeOffset
Definition: objects.h:6229
static const int kBitField2Offset
Definition: objects.h:6233
static const int kPrototypeOffset
Definition: objects.h:6190
ExponentType exponent_type() const
Definition: code-stubs.h:780
static const Register exponent()
static const size_t kWriteBarrierCounterOffset
Definition: spaces.h:536
static const int kEvacuationCandidateMask
Definition: spaces.h:398
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:400
NameDictionaryLookupStub(Isolate *isolate, LookupMode mode)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
static const int kHashShift
Definition: objects.h:8499
static const int kHashFieldOffset
Definition: objects.h:8486
static void GenerateLoadFunctionPrototype(MacroAssembler *masm, Register receiver, Register scratch1, Register scratch2, Label *miss_label)
static const intptr_t kPageAlignmentMask
Definition: spaces.h:757
virtual void Generate(MacroAssembler *masm)=0
ProfileEntryHookStub(Isolate *isolate)
Definition: code-stubs.h:2373
static void MaybeCallEntryHook(MacroAssembler *masm)
static void EntryHookTrampoline(intptr_t function, intptr_t stack_pointer, Isolate *isolate)
Definition: code-stubs.cc:925
void SaveCallerSaveRegisters(MacroAssembler *masm, SaveFPRegsMode mode)
void RestoreCallerSaveRegisters(MacroAssembler *masm, SaveFPRegsMode mode)
void GenerateIncremental(MacroAssembler *masm, Mode mode)
void InformIncrementalMarker(MacroAssembler *masm)
RememberedSetAction remembered_set_action() const
static void PatchBranchIntoNop(MacroAssembler *masm, int pos)
SaveFPRegsMode save_fp_regs_mode() const
void CheckNeedsToInformIncrementalMarker(MacroAssembler *masm, OnNoNeedToInformIncrementalMarker on_no_need, Mode mode)
virtual void Generate(MacroAssembler *masm) OVERRIDE
static const int kLastCaptureCountOffset
Definition: jsregexp.h:168
static const int kLastSubjectOffset
Definition: jsregexp.h:170
static const int kLastMatchOverhead
Definition: jsregexp.h:165
static const int kLastInputOffset
Definition: jsregexp.h:172
static const int kFirstCaptureOffset
Definition: jsregexp.h:174
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:9312
static const int kHeaderSize
Definition: objects.h:8941
static const int kConstructStubOffset
Definition: objects.h:6896
static const int kFeedbackVectorOffset
Definition: objects.h:6904
static const int kCompilerHintsOffset
Definition: objects.h:6961
static const int kMinLength
Definition: objects.h:9109
static const int kParentOffset
Definition: objects.h:9104
static const int kOffsetOffset
Definition: objects.h:9105
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
static const int kContextOffset
Definition: frames.h:162
static const int kCallerSPOffset
Definition: frames.h:167
static const int kCallerFPOffset
Definition: frames.h:165
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
StoreBufferOverflowStub(Isolate *isolate, SaveFPRegsMode save_fp)
Definition: code-stubs.h:2395
void GenerateFast(MacroAssembler *masm)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
void GenerateFast(MacroAssembler *masm)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static void GenerateOneByteCharsCompareLoop(MacroAssembler *masm, Register left, Register right, Register length, Register scratch1, Register scratch2, Label *chars_not_equal)
static void GenerateCompareFlatOneByteStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
static void GenerateCopyCharacters(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, String::Encoding encoding)
static void GenerateFlatOneByteStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8811
static const int kLengthOffset
Definition: objects.h:8802
static const int kCallerStackParameterCountFrameOffset
Definition: frames.h:755
StubFunctionMode function_mode() const
Definition: code-stubs.h:2360
static void GenerateAheadOfTime(Isolate *isolate)
Definition: code-stubs.cc:917
static bool IsOrderedRelationalCompareOp(Value op)
Definition: token.h:206
static bool IsEqualityOp(Value op)
Definition: token.h:210
static Handle< Object > UninitializedSentinel(Isolate *isolate)
static Handle< Object > MegamorphicSentinel(Isolate *isolate)
static const Register VectorRegister()
void Generate(MacroAssembler *masm)
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
WriteInt32ToHeapNumberStub(Isolate *isolate, Register the_int, Register the_heap_number, Register scratch)
#define __
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define V8_INFINITY
Definition: globals.h:25
#define FUNCTION_ADDR(f)
Definition: globals.h:195
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
@ JUMP_FUNCTION
@ CALL_FUNCTION
AllocationFlags
@ SIZE_IN_WORDS
@ TAG_OBJECT
int int32_t
Definition: unicode.cc:24
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
const int kPointerSize
Definition: globals.h:129
const LowDwVfpRegister d2
const uint32_t kStringEncodingMask
Definition: objects.h:555
const Register r2
@ DONT_DO_SMI_CHECK
Definition: globals.h:640
const LowDwVfpRegister d7
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number)
@ DONT_TRACK_ALLOCATION_SITE
Definition: objects.h:8084
@ kSeqStringTag
Definition: objects.h:563
@ kConsStringTag
Definition: objects.h:564
@ kSlicedStringTag
Definition: objects.h:566
@ kExternalStringTag
Definition: objects.h:565
const LowDwVfpRegister d6
const Register cp
const intptr_t kPointerAlignmentMask
Definition: globals.h:231
const LowDwVfpRegister d1
const Register r6
const uint32_t kTwoByteStringTag
Definition: objects.h:556
const Register r0
const uint32_t kShortExternalStringTag
Definition: objects.h:590
const RegList kCalleeSaved
Definition: frames-arm.h:38
const int kSmiTagSize
Definition: v8.h:5743
const LowDwVfpRegister d0
const int kFastElementsKindPackedToHoley
Definition: elements-kind.h:71
const Register ip
const int kDoubleSize
Definition: globals.h:127
const uint32_t kNotStringTag
Definition: objects.h:545
const Register r3
const Register fp
@ JS_FUNCTION_STUB_MODE
Definition: code-stubs.h:350
const Register sp
const int kPointerSizeLog2
Definition: globals.h:147
const uint32_t kStringTag
Definition: objects.h:544
@ JS_REGEXP_TYPE
Definition: objects.h:748
@ JS_ARRAY_TYPE
Definition: objects.h:738
@ FIXED_ARRAY_TYPE
Definition: objects.h:717
@ JS_OBJECT_TYPE
Definition: objects.h:731
@ FIRST_NONSTRING_TYPE
Definition: objects.h:758
@ ODDBALL_TYPE
Definition: objects.h:663
@ FIRST_SPEC_OBJECT_TYPE
Definition: objects.h:781
@ LAST_SPEC_OBJECT_TYPE
Definition: objects.h:782
@ HEAP_NUMBER_TYPE
Definition: objects.h:669
@ JS_FUNCTION_TYPE
Definition: objects.h:749
@ JS_FUNCTION_PROXY_TYPE
Definition: objects.h:726
@ FAST_HOLEY_DOUBLE_ELEMENTS
Definition: elements-kind.h:27
@ TERMINAL_FAST_ELEMENTS_KIND
Definition: elements-kind.h:63
@ FAST_HOLEY_SMI_ELEMENTS
Definition: elements-kind.h:17
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:146
const uint32_t kOneByteStringTag
Definition: objects.h:557
const Register r4
MemOperand FieldMemOperand(Register object, int offset)
const intptr_t kObjectAlignmentMask
Definition: globals.h:227
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind)
bool IsFastPackedElementsKind(ElementsKind kind)
const Register r9
const Register pc
const uint32_t kShortExternalStringMask
Definition: objects.h:589
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
AllocationSiteOverrideMode
Definition: code-stubs.h:716
@ DISABLE_ALLOCATION_SITES
Definition: code-stubs.h:718
const Register r5
Condition NegateCondition(Condition cond)
Definition: constants-arm.h:86
const uint32_t kStringRepresentationMask
Definition: objects.h:561
uint32_t RegList
Definition: frames.h:18
const Register lr
const SwVfpRegister s6
byte * Address
Definition: globals.h:101
const uint32_t kSlicedNotConsMask
Definition: objects.h:579
const Register r8
const Register r1
const int kIntSize
Definition: globals.h:124
const int kHeapObjectTag
Definition: v8.h:5737
const int kSmiShiftSize
Definition: v8.h:5805
const Register no_reg
const uint32_t kInternalizedTag
Definition: objects.h:551
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const intptr_t kSmiTagMask
Definition: v8.h:5744
const uint32_t kIsNotInternalizedMask
Definition: objects.h:549
const int kSmiTag
Definition: v8.h:5742
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
const LowDwVfpRegister d3
const uint32_t kIsNotStringMask
Definition: objects.h:543
const int kNumCalleeSaved
Definition: frames-arm.h:58
const int kNumDoubleCalleeSaved
Definition: frames-arm.h:61
ElementsKind GetInitialFastElementsKind()
Definition: elements-kind.h:78
@ STRING_INDEX_IS_NUMBER
Definition: code-stubs.h:1590
@ STRING_INDEX_IS_ARRAY_INDEX
Definition: code-stubs.h:1595
const uint32_t kIsIndirectStringMask
Definition: objects.h:568
const RegList kCallerSaved
Definition: frames-arm.h:50
const Register r7
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
static Handle< Value > Throw(Isolate *isolate, const char *message)
Definition: d8.cc:72
bool is(Register reg) const
#define T(name, string, precedence)
Definition: token.cc:25