V8 Project
ic-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_ARM
8 
9 #include "src/codegen.h"
10 #include "src/ic/ic.h"
11 #include "src/ic/ic-compiler.h"
12 #include "src/ic/stub-cache.h"
13 
14 namespace v8 {
15 namespace internal {
16 
17 
18 // ----------------------------------------------------------------------------
19 // Static IC stub generators.
20 //
21 
22 #define __ ACCESS_MASM(masm)
23 
24 
25 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
26  Label* global_object) {
27  // Register usage:
28  // type: holds the receiver instance type on entry.
29  __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
30  __ b(eq, global_object);
31  __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
32  __ b(eq, global_object);
33  __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
34  __ b(eq, global_object);
35 }
36 
37 
38 // Helper function used from LoadIC GenerateNormal.
39 //
40 // elements: Property dictionary. It is not clobbered if a jump to the miss
41 // label is done.
42 // name: Property name. It is not clobbered if a jump to the miss label is
43 // done
44 // result: Register for the result. It is only updated if a jump to the miss
45 // label is not done. Can be the same as elements or name clobbering
46 // one of these in the case of not jumping to the miss label.
47 // The two scratch registers need to be different from elements, name and
48 // result.
49 // The generated code assumes that the receiver has slow properties,
50 // is not a global object and does not have interceptors.
51 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
52  Register elements, Register name,
53  Register result, Register scratch1,
54  Register scratch2) {
55  // Main use of the scratch registers.
56  // scratch1: Used as temporary and to hold the capacity of the property
57  // dictionary.
58  // scratch2: Used as temporary.
59  Label done;
60 
61  // Probe the dictionary.
62  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
63  name, scratch1, scratch2);
64 
65  // If probing finds an entry check that the value is a normal
66  // property.
67  __ bind(&done); // scratch2 == elements + 4 * index
68  const int kElementsStartOffset =
71  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
72  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
73  __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
74  __ b(ne, miss);
75 
76  // Get the value at the masked, scaled index and return.
77  __ ldr(result,
78  FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
79 }
80 
81 
82 // Helper function used from StoreIC::GenerateNormal.
83 //
84 // elements: Property dictionary. It is not clobbered if a jump to the miss
85 // label is done.
86 // name: Property name. It is not clobbered if a jump to the miss label is
87 // done
88 // value: The value to store.
89 // The two scratch registers need to be different from elements, name and
90 // result.
91 // The generated code assumes that the receiver has slow properties,
92 // is not a global object and does not have interceptors.
93 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
94  Register elements, Register name,
95  Register value, Register scratch1,
96  Register scratch2) {
97  // Main use of the scratch registers.
98  // scratch1: Used as temporary and to hold the capacity of the property
99  // dictionary.
100  // scratch2: Used as temporary.
101  Label done;
102 
103  // Probe the dictionary.
104  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
105  name, scratch1, scratch2);
106 
107  // If probing finds an entry in the dictionary check that the value
108  // is a normal property that is not read only.
109  __ bind(&done); // scratch2 == elements + 4 * index
110  const int kElementsStartOffset =
113  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
114  const int kTypeAndReadOnlyMask =
115  (PropertyDetails::TypeField::kMask |
116  PropertyDetails::AttributesField::encode(READ_ONLY))
117  << kSmiTagSize;
118  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
119  __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
120  __ b(ne, miss);
121 
122  // Store the value at the masked, scaled index and return.
123  const int kValueOffset = kElementsStartOffset + kPointerSize;
124  __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
125  __ str(value, MemOperand(scratch2));
126 
127  // Update the write barrier. Make sure not to clobber the value.
128  __ mov(scratch1, value);
129  __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
131 }
132 
133 
134 // Checks the receiver for special cases (value type, slow case bits).
135 // Falls through for regular JS object.
136 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
137  Register receiver, Register map,
138  Register scratch,
139  int interceptor_bit, Label* slow) {
140  // Check that the object isn't a smi.
141  __ JumpIfSmi(receiver, slow);
142  // Get the map of the receiver.
143  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
144  // Check bit field.
145  __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
146  __ tst(scratch,
147  Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
148  __ b(ne, slow);
149  // Check that the object is some kind of JS object EXCEPT JS Value type.
150  // In the case that the object is a value-wrapper object,
151  // we enter the runtime system to make sure that indexing into string
152  // objects work as intended.
155  __ cmp(scratch, Operand(JS_OBJECT_TYPE));
156  __ b(lt, slow);
157 }
158 
159 
160 // Loads an indexed element from a fast case array.
161 // If not_fast_array is NULL, doesn't perform the elements map check.
162 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
163  Register key, Register elements,
164  Register scratch1, Register scratch2,
165  Register result, Label* not_fast_array,
166  Label* out_of_range) {
167  // Register use:
168  //
169  // receiver - holds the receiver on entry.
170  // Unchanged unless 'result' is the same register.
171  //
172  // key - holds the smi key on entry.
173  // Unchanged unless 'result' is the same register.
174  //
175  // elements - holds the elements of the receiver on exit.
176  //
177  // result - holds the result on exit if the load succeeded.
178  // Allowed to be the the same as 'receiver' or 'key'.
179  // Unchanged on bailout so 'receiver' and 'key' can be safely
180  // used by further computation.
181  //
182  // Scratch registers:
183  //
184  // scratch1 - used to hold elements map and elements length.
185  // Holds the elements map if not_fast_array branch is taken.
186  //
187  // scratch2 - used to hold the loaded value.
188 
189  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
190  if (not_fast_array != NULL) {
191  // Check that the object is in fast mode and writable.
192  __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
193  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
194  __ cmp(scratch1, ip);
195  __ b(ne, not_fast_array);
196  } else {
197  __ AssertFastElements(elements);
198  }
199  // Check that the key (index) is within bounds.
200  __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
201  __ cmp(key, Operand(scratch1));
202  __ b(hs, out_of_range);
203  // Fast case: Do the load.
204  __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
205  __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
206  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
207  __ cmp(scratch2, ip);
208  // In case the loaded value is the_hole we have to consult GetProperty
209  // to ensure the prototype chain is searched.
210  __ b(eq, out_of_range);
211  __ mov(result, scratch2);
212 }
213 
214 
215 // Checks whether a key is an array index string or a unique name.
216 // Falls through if a key is a unique name.
217 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
218  Register map, Register hash,
219  Label* index_string, Label* not_unique) {
220  // The key is not a smi.
221  Label unique;
222  // Is it a name?
223  __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
224  __ b(hi, not_unique);
226  __ b(eq, &unique);
227 
228  // Is the string an array index, with cached numeric value?
229  __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
230  __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
231  __ b(eq, index_string);
232 
233  // Is the string internalized? We know it's a string, so a single
234  // bit test is enough.
235  // map: key map
238  __ tst(hash, Operand(kIsNotInternalizedMask));
239  __ b(ne, not_unique);
240 
241  __ bind(&unique);
242 }
243 
244 
245 void LoadIC::GenerateNormal(MacroAssembler* masm) {
246  Register dictionary = r0;
247  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
248  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
249 
250  Label slow;
251 
254  GenerateDictionaryLoad(masm, &slow, dictionary,
256  __ Ret();
257 
258  // Dictionary load failed, go slow (but don't miss).
259  __ bind(&slow);
261 }
262 
263 
264 // A register that isn't one of the parameters to the load ic.
265 static const Register LoadIC_TempRegister() { return r3; }
266 
267 
268 void LoadIC::GenerateMiss(MacroAssembler* masm) {
269  // The return address is in lr.
270  Isolate* isolate = masm->isolate();
271 
272  __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
273 
274  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
275  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
276 
277  // Perform tail call to the entry.
278  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
279  __ TailCallExternalReference(ref, 2, 1);
280 }
281 
282 
283 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
284  // The return address is in lr.
285 
286  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
287  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
288 
289  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
290 }
291 
292 
293 static MemOperand GenerateMappedArgumentsLookup(
294  MacroAssembler* masm, Register object, Register key, Register scratch1,
295  Register scratch2, Register scratch3, Label* unmapped_case,
296  Label* slow_case) {
297  Heap* heap = masm->isolate()->heap();
298 
299  // Check that the receiver is a JSObject. Because of the map check
300  // later, we do not need to check for interceptors or whether it
301  // requires access checks.
302  __ JumpIfSmi(object, slow_case);
303  // Check that the object is some kind of JSObject.
304  __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
305  __ b(lt, slow_case);
306 
307  // Check that the key is a positive smi.
308  __ tst(key, Operand(0x80000001));
309  __ b(ne, slow_case);
310 
311  // Load the elements into scratch1 and check its map.
312  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
313  __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
314  __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
315 
316  // Check if element is in the range of mapped arguments. If not, jump
317  // to the unmapped lookup with the parameter map in scratch1.
318  __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
319  __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
320  __ cmp(key, Operand(scratch2));
321  __ b(cs, unmapped_case);
322 
323  // Load element index and check whether it is the hole.
324  const int kOffset =
326 
327  __ mov(scratch3, Operand(kPointerSize >> 1));
328  __ mul(scratch3, key, scratch3);
329  __ add(scratch3, scratch3, Operand(kOffset));
330 
331  __ ldr(scratch2, MemOperand(scratch1, scratch3));
332  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
333  __ cmp(scratch2, scratch3);
334  __ b(eq, unmapped_case);
335 
336  // Load value from context and return it. We can reuse scratch1 because
337  // we do not jump to the unmapped lookup (which requires the parameter
338  // map in scratch1).
339  __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
340  __ mov(scratch3, Operand(kPointerSize >> 1));
341  __ mul(scratch3, scratch2, scratch3);
342  __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
343  return MemOperand(scratch1, scratch3);
344 }
345 
346 
347 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
348  Register key,
349  Register parameter_map,
350  Register scratch,
351  Label* slow_case) {
352  // Element is in arguments backing store, which is referenced by the
353  // second element of the parameter_map. The parameter_map register
354  // must be loaded with the parameter map of the arguments object and is
355  // overwritten.
356  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
357  Register backing_store = parameter_map;
358  __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
359  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
360  __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
362  __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
363  __ cmp(key, Operand(scratch));
364  __ b(cs, slow_case);
365  __ mov(scratch, Operand(kPointerSize >> 1));
366  __ mul(scratch, key, scratch);
367  __ add(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
368  return MemOperand(backing_store, scratch);
369 }
370 
371 
372 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
373  Register receiver = StoreDescriptor::ReceiverRegister();
374  Register key = StoreDescriptor::NameRegister();
375  Register value = StoreDescriptor::ValueRegister();
376  DCHECK(receiver.is(r1));
377  DCHECK(key.is(r2));
378  DCHECK(value.is(r0));
379 
380  Label slow, notin;
381  MemOperand mapped_location = GenerateMappedArgumentsLookup(
382  masm, receiver, key, r3, r4, r5, &notin, &slow);
383  __ str(value, mapped_location);
384  __ add(r6, r3, r5);
385  __ mov(r9, value);
386  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
387  __ Ret();
388  __ bind(&notin);
389  // The unmapped lookup expects that the parameter map is in r3.
390  MemOperand unmapped_location =
391  GenerateUnmappedArgumentsLookup(masm, key, r3, r4, &slow);
392  __ str(value, unmapped_location);
393  __ add(r6, r3, r4);
394  __ mov(r9, value);
395  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
396  __ Ret();
397  __ bind(&slow);
398  GenerateMiss(masm);
399 }
400 
401 
402 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
403  // The return address is in lr.
404  Isolate* isolate = masm->isolate();
405 
406  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
407 
409 
410  // Perform tail call to the entry.
411  ExternalReference ref =
412  ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
413 
414  __ TailCallExternalReference(ref, 2, 1);
415 }
416 
417 
418 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
419  // The return address is in lr.
420 
422 
423  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
424 }
425 
426 
427 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
428  // The return address is in lr.
429  Label slow, check_name, index_smi, index_name, property_array_property;
430  Label probe_dictionary, check_number_dictionary;
431 
432  Register key = LoadDescriptor::NameRegister();
433  Register receiver = LoadDescriptor::ReceiverRegister();
434  DCHECK(key.is(r2));
435  DCHECK(receiver.is(r1));
436 
437  Isolate* isolate = masm->isolate();
438 
439  // Check that the key is a smi.
440  __ JumpIfNotSmi(key, &check_name);
441  __ bind(&index_smi);
442  // Now the key is known to be a smi. This place is also jumped to from below
443  // where a numeric string is converted to a smi.
444 
445  GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
447 
448  // Check the receiver's map to see if it has fast elements.
449  __ CheckFastElements(r0, r3, &check_number_dictionary);
450 
451  GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, NULL, &slow);
452  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3);
453  __ Ret();
454 
455  __ bind(&check_number_dictionary);
458 
459  // Check whether the elements is a number dictionary.
460  // r3: elements map
461  // r4: elements
462  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
463  __ cmp(r3, ip);
464  __ b(ne, &slow);
465  __ SmiUntag(r0, key);
466  __ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5);
467  __ Ret();
468 
469  // Slow case, key and receiver still in r2 and r1.
470  __ bind(&slow);
471  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r4,
472  r3);
474 
475  __ bind(&check_name);
476  GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
477 
478  GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
480 
481  // If the receiver is a fast-case object, check the keyed lookup
482  // cache. Otherwise probe the dictionary.
485  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
486  __ cmp(r4, ip);
487  __ b(eq, &probe_dictionary);
488 
489  // Load the map of the receiver, compute the keyed lookup cache hash
490  // based on 32 bits of the map pointer and the name hash.
491  __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
492  __ mov(r3, Operand(r0, ASR, KeyedLookupCache::kMapHashShift));
494  __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift));
496  __ And(r3, r3, Operand(mask));
497 
498  // Load the key (consisting of map and unique name) from the cache and
499  // check for match.
500  Label load_in_object_property;
501  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
502  Label hit_on_nth_entry[kEntriesPerBucket];
503  ExternalReference cache_keys =
504  ExternalReference::keyed_lookup_cache_keys(isolate);
505 
506  __ mov(r4, Operand(cache_keys));
507  __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
508 
509  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
510  Label try_next_entry;
511  // Load map and move r4 to next entry.
512  __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
513  __ cmp(r0, r5);
514  __ b(ne, &try_next_entry);
515  __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name
516  __ cmp(key, r5);
517  __ b(eq, &hit_on_nth_entry[i]);
518  __ bind(&try_next_entry);
519  }
520 
521  // Last entry: Load map and move r4 to name.
523  __ cmp(r0, r5);
524  __ b(ne, &slow);
525  __ ldr(r5, MemOperand(r4));
526  __ cmp(key, r5);
527  __ b(ne, &slow);
528 
529  // Get field offset.
530  // r0 : receiver's map
531  // r3 : lookup cache index
532  ExternalReference cache_field_offsets =
533  ExternalReference::keyed_lookup_cache_field_offsets(isolate);
534 
535  // Hit on nth entry.
536  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
537  __ bind(&hit_on_nth_entry[i]);
538  __ mov(r4, Operand(cache_field_offsets));
539  if (i != 0) {
540  __ add(r3, r3, Operand(i));
541  }
544  __ sub(r5, r5, r6, SetCC);
545  __ b(ge, &property_array_property);
546  if (i != 0) {
547  __ jmp(&load_in_object_property);
548  }
549  }
550 
551  // Load in-object property.
552  __ bind(&load_in_object_property);
554  __ add(r6, r6, r5); // Index from start of object.
555  __ sub(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag.
556  __ ldr(r0, MemOperand(receiver, r6, LSL, kPointerSizeLog2));
557  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
558  r4, r3);
559  __ Ret();
560 
561  // Load property array property.
562  __ bind(&property_array_property);
563  __ ldr(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
564  __ add(receiver, receiver, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
565  __ ldr(r0, MemOperand(receiver, r5, LSL, kPointerSizeLog2));
566  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
567  r4, r3);
568  __ Ret();
569 
570  // Do a quick inline probe of the receiver's dictionary, if it
571  // exists.
572  __ bind(&probe_dictionary);
573  // r3: elements
574  __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
576  GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
577  // Load the property to r0.
578  GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
579  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r4,
580  r3);
581  __ Ret();
582 
583  __ bind(&index_name);
584  __ IndexFromHash(r3, key);
585  // Now jump to the place where smi keys are handled.
586  __ jmp(&index_smi);
587 }
588 
589 
590 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
591  // Return address is in lr.
592  Label miss;
593 
594  Register receiver = LoadDescriptor::ReceiverRegister();
595  Register index = LoadDescriptor::NameRegister();
596  Register scratch = r3;
597  Register result = r0;
598  DCHECK(!scratch.is(receiver) && !scratch.is(index));
599 
600  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
601  &miss, // When not a string.
602  &miss, // When not a number.
603  &miss, // When index out of range.
605  char_at_generator.GenerateFast(masm);
606  __ Ret();
607 
608  StubRuntimeCallHelper call_helper;
609  char_at_generator.GenerateSlow(masm, call_helper);
610 
611  __ bind(&miss);
612  GenerateMiss(masm);
613 }
614 
615 
616 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
617  // Push receiver, key and value for runtime call.
620 
621  ExternalReference ref =
622  ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
623  __ TailCallExternalReference(ref, 3, 1);
624 }
625 
626 
627 static void KeyedStoreGenerateGenericHelper(
628  MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
629  KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
630  Register value, Register key, Register receiver, Register receiver_map,
631  Register elements_map, Register elements) {
632  Label transition_smi_elements;
633  Label finish_object_store, non_double_value, transition_double_elements;
634  Label fast_double_without_map_check;
635 
636  // Fast case: Do the store, could be either Object or double.
637  __ bind(fast_object);
638  Register scratch_value = r4;
639  Register address = r5;
640  if (check_map == kCheckMap) {
641  __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
642  __ cmp(elements_map,
643  Operand(masm->isolate()->factory()->fixed_array_map()));
644  __ b(ne, fast_double);
645  }
646 
647  // HOLECHECK: guards "A[i] = V"
648  // We have to go to the runtime if the current value is the hole because
649  // there may be a callback on the element
650  Label holecheck_passed1;
651  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
652  __ ldr(scratch_value,
653  MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
654  __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
655  __ b(ne, &holecheck_passed1);
656  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
657  slow);
658 
659  __ bind(&holecheck_passed1);
660 
661  // Smi stores don't require further checks.
662  Label non_smi_value;
663  __ JumpIfNotSmi(value, &non_smi_value);
664 
665  if (increment_length == kIncrementLength) {
666  // Add 1 to receiver->length.
667  __ add(scratch_value, key, Operand(Smi::FromInt(1)));
668  __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
669  }
670  // It's irrelevant whether array is smi-only or not when writing a smi.
671  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
672  __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
673  __ Ret();
674 
675  __ bind(&non_smi_value);
676  // Escape to elements kind transition case.
677  __ CheckFastObjectElements(receiver_map, scratch_value,
678  &transition_smi_elements);
679 
680  // Fast elements array, store the value to the elements backing store.
681  __ bind(&finish_object_store);
682  if (increment_length == kIncrementLength) {
683  // Add 1 to receiver->length.
684  __ add(scratch_value, key, Operand(Smi::FromInt(1)));
685  __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
686  }
687  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
688  __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
689  __ str(value, MemOperand(address));
690  // Update write barrier for the elements array address.
691  __ mov(scratch_value, value); // Preserve the value which is returned.
692  __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved,
694  __ Ret();
695 
696  __ bind(fast_double);
697  if (check_map == kCheckMap) {
698  // Check for fast double array case. If this fails, call through to the
699  // runtime.
700  __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
701  __ b(ne, slow);
702  }
703 
704  // HOLECHECK: guards "A[i] double hole?"
705  // We have to see if the double version of the hole is present. If so
706  // go to the runtime.
707  __ add(address, elements,
708  Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) -
709  kHeapObjectTag));
710  __ ldr(scratch_value,
711  MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
712  __ cmp(scratch_value, Operand(kHoleNanUpper32));
713  __ b(ne, &fast_double_without_map_check);
714  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
715  slow);
716 
717  __ bind(&fast_double_without_map_check);
718  __ StoreNumberToDoubleElements(value, key, elements, r3, d0,
719  &transition_double_elements);
720  if (increment_length == kIncrementLength) {
721  // Add 1 to receiver->length.
722  __ add(scratch_value, key, Operand(Smi::FromInt(1)));
723  __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
724  }
725  __ Ret();
726 
727  __ bind(&transition_smi_elements);
728  // Transition the array appropriately depending on the value type.
730  __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
731  __ b(ne, &non_double_value);
732 
733  // Value is a double. Transition FAST_SMI_ELEMENTS ->
734  // FAST_DOUBLE_ELEMENTS and complete the store.
735  __ LoadTransitionedArrayMapConditional(
736  FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r4, slow);
739  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
740  receiver_map, mode, slow);
741  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
742  __ jmp(&fast_double_without_map_check);
743 
744  __ bind(&non_double_value);
745  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
746  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
747  receiver_map, r4, slow);
750  masm, receiver, key, value, receiver_map, mode, slow);
751  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
752  __ jmp(&finish_object_store);
753 
754  __ bind(&transition_double_elements);
755  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
756  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
757  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
758  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
759  receiver_map, r4, slow);
762  masm, receiver, key, value, receiver_map, mode, slow);
763  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
764  __ jmp(&finish_object_store);
765 }
766 
767 
768 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
769  StrictMode strict_mode) {
770  // ---------- S t a t e --------------
771  // -- r0 : value
772  // -- r1 : key
773  // -- r2 : receiver
774  // -- lr : return address
775  // -----------------------------------
776  Label slow, fast_object, fast_object_grow;
777  Label fast_double, fast_double_grow;
778  Label array, extra, check_if_double_array;
779 
780  // Register usage.
781  Register value = StoreDescriptor::ValueRegister();
782  Register key = StoreDescriptor::NameRegister();
783  Register receiver = StoreDescriptor::ReceiverRegister();
784  DCHECK(receiver.is(r1));
785  DCHECK(key.is(r2));
786  DCHECK(value.is(r0));
787  Register receiver_map = r3;
788  Register elements_map = r6;
789  Register elements = r9; // Elements array of the receiver.
790  // r4 and r5 are used as general scratch registers.
791 
792  // Check that the key is a smi.
793  __ JumpIfNotSmi(key, &slow);
794  // Check that the object isn't a smi.
795  __ JumpIfSmi(receiver, &slow);
796  // Get the map of the object.
797  __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
798  // Check that the receiver does not require access checks and is not observed.
799  // The generic stub does not perform map checks or handle observed objects.
800  __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
801  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
802  __ b(ne, &slow);
803  // Check if the object is a JS array or not.
804  __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
805  __ cmp(r4, Operand(JS_ARRAY_TYPE));
806  __ b(eq, &array);
807  // Check that the object is some kind of JSObject.
808  __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
809  __ b(lt, &slow);
810 
811  // Object case: Check key against length in the elements array.
812  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
813  // Check array bounds. Both the key and the length of FixedArray are smis.
815  __ cmp(key, Operand(ip));
816  __ b(lo, &fast_object);
817 
818  // Slow case, handle jump to runtime.
819  __ bind(&slow);
820  // Entry registers are intact.
821  // r0: value.
822  // r1: key.
823  // r2: receiver.
825 
826  // Extra capacity case: Check if there is extra capacity to
827  // perform the store and update the length. Used for adding one
828  // element to the array by writing to array[array.length].
829  __ bind(&extra);
830  // Condition code from comparing key and array length is still available.
831  __ b(ne, &slow); // Only support writing to writing to array[array.length].
832  // Check for room in the elements backing store.
833  // Both the key and the length of FixedArray are smis.
835  __ cmp(key, Operand(ip));
836  __ b(hs, &slow);
837  __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
838  __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
839  __ b(ne, &check_if_double_array);
840  __ jmp(&fast_object_grow);
841 
842  __ bind(&check_if_double_array);
843  __ cmp(elements_map,
844  Operand(masm->isolate()->factory()->fixed_double_array_map()));
845  __ b(ne, &slow);
846  __ jmp(&fast_double_grow);
847 
848  // Array case: Get the length and the elements array from the JS
849  // array. Check that the array is in fast mode (and writable); if it
850  // is the length is always a smi.
851  __ bind(&array);
852  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
853 
854  // Check the key against the length in the array.
855  __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
856  __ cmp(key, Operand(ip));
857  __ b(hs, &extra);
858 
859  KeyedStoreGenerateGenericHelper(
860  masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
861  value, key, receiver, receiver_map, elements_map, elements);
862  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
863  &slow, kDontCheckMap, kIncrementLength, value,
864  key, receiver, receiver_map, elements_map,
865  elements);
866 }
867 
868 
869 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
870  Register receiver = StoreDescriptor::ReceiverRegister();
871  Register name = StoreDescriptor::NameRegister();
872  DCHECK(receiver.is(r1));
873  DCHECK(name.is(r2));
875 
876  // Get the receiver from the stack and probe the stub cache.
878  Code::ComputeHandlerFlags(Code::STORE_IC));
879 
880  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
881  name, r3, r4, r5, r6);
882 
883  // Cache miss: Jump to runtime.
884  GenerateMiss(masm);
885 }
886 
887 
888 void StoreIC::GenerateMiss(MacroAssembler* masm) {
891 
892  // Perform tail call to the entry.
893  ExternalReference ref =
894  ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
895  __ TailCallExternalReference(ref, 3, 1);
896 }
897 
898 
899 void StoreIC::GenerateNormal(MacroAssembler* masm) {
900  Label miss;
901  Register receiver = StoreDescriptor::ReceiverRegister();
902  Register name = StoreDescriptor::NameRegister();
903  Register value = StoreDescriptor::ValueRegister();
904  Register dictionary = r3;
905  DCHECK(receiver.is(r1));
906  DCHECK(name.is(r2));
907  DCHECK(value.is(r0));
908 
909  __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
910 
911  GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5);
912  Counters* counters = masm->isolate()->counters();
913  __ IncrementCounter(counters->store_normal_hit(), 1, r4, r5);
914  __ Ret();
915 
916  __ bind(&miss);
917  __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
918  GenerateMiss(masm);
919 }
920 
921 
922 #undef __
923 
924 
926  switch (op) {
927  case Token::EQ_STRICT:
928  case Token::EQ:
929  return eq;
930  case Token::LT:
931  return lt;
932  case Token::GT:
933  return gt;
934  case Token::LTE:
935  return le;
936  case Token::GTE:
937  return ge;
938  default:
939  UNREACHABLE();
940  return kNoCondition;
941  }
942 }
943 
944 
946  // The address of the instruction following the call.
947  Address cmp_instruction_address =
949 
950  // If the instruction following the call is not a cmp rx, #yyy, nothing
951  // was inlined.
952  Instr instr = Assembler::instr_at(cmp_instruction_address);
953  return Assembler::IsCmpImmediate(instr);
954 }
955 
956 
957 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
958  Address cmp_instruction_address =
960 
961  // If the instruction following the call is not a cmp rx, #yyy, nothing
962  // was inlined.
963  Instr instr = Assembler::instr_at(cmp_instruction_address);
964  if (!Assembler::IsCmpImmediate(instr)) {
965  return;
966  }
967 
968  // The delta to the start of the map check instruction and the
969  // condition code uses at the patched jump.
970  int delta = Assembler::GetCmpImmediateRawImmediate(instr);
972  // If the delta is 0 the instruction is cmp r0, #0 which also signals that
973  // nothing was inlined.
974  if (delta == 0) {
975  return;
976  }
977 
978  if (FLAG_trace_ic) {
979  PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address,
980  cmp_instruction_address, delta);
981  }
982 
983  Address patch_address =
984  cmp_instruction_address - delta * Instruction::kInstrSize;
985  Instr instr_at_patch = Assembler::instr_at(patch_address);
986  Instr branch_instr =
988  // This is patching a conditional "jump if not smi/jump if smi" site.
989  // Enabling by changing from
990  // cmp rx, rx
991  // b eq/ne, <target>
992  // to
993  // tst rx, #kSmiTagMask
994  // b ne/eq, <target>
995  // and vice-versa to be disabled again.
996  CodePatcher patcher(patch_address, 2);
997  Register reg = Assembler::GetRn(instr_at_patch);
998  if (check == ENABLE_INLINED_SMI_CHECK) {
999  DCHECK(Assembler::IsCmpRegister(instr_at_patch));
1000  DCHECK_EQ(Assembler::GetRn(instr_at_patch).code(),
1001  Assembler::GetRm(instr_at_patch).code());
1002  patcher.masm()->tst(reg, Operand(kSmiTagMask));
1003  } else {
1005  DCHECK(Assembler::IsTstImmediate(instr_at_patch));
1006  patcher.masm()->cmp(reg, reg);
1007  }
1008  DCHECK(Assembler::IsBranch(branch_instr));
1009  if (Assembler::GetCondition(branch_instr) == eq) {
1010  patcher.EmitCondition(ne);
1011  } else {
1012  DCHECK(Assembler::GetCondition(branch_instr) == ne);
1013  patcher.EmitCondition(eq);
1014  }
1015 }
1016 }
1017 } // namespace v8::internal
1018 
1019 #endif // V8_TARGET_ARCH_ARM
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1591
static bool IsCmpRegister(Instr instr)
static int GetCmpImmediateRawImmediate(Instr instr)
static bool IsCmpImmediate(Instr instr)
static bool IsTstImmediate(Instr instr)
static Register GetRn(Instr instr)
static Register GetRm(Instr instr)
static bool IsBranch(Instr instr)
static Register GetCmpImmediateRegister(Instr instr)
static Address return_address_from_call_start(Address pc)
static Condition GetCondition(Instr instr)
static Flags ComputeHandlerFlags(Kind handler_kind, StubType type=NORMAL, CacheHolderFlag holder=kCacheOnReceiver)
Definition: objects-inl.h:4975
static Flags RemoveTypeAndHolderFromFlags(Flags flags)
Definition: objects-inl.h:5012
uint32_t Flags
Definition: objects.h:4929
static bool HasInlinedSmiCode(Address address)
static Condition ComputeCondition(Token::Value op)
static void GenerateSmiToDouble(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *allocation_memento_found)
static void GenerateDoubleToObject(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static const int kLengthOffset
Definition: objects.h:2392
static const int kHeaderSize
Definition: objects.h:2393
static const int kElementsStartIndex
Definition: objects.h:3274
static const int kMapOffset
Definition: objects.h:1427
Isolate * isolate() const
Definition: ic.h:136
Address address() const
Definition: ic-inl.h:19
Counters * counters()
Definition: isolate.h:857
static const int kLengthOffset
Definition: objects.h:10072
static const int kPropertiesOffset
Definition: objects.h:2193
static const int kElementsOffset
Definition: objects.h:2194
static void GenerateMiss(MacroAssembler *masm)
static void GenerateString(MacroAssembler *masm)
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static void GenerateGeneric(MacroAssembler *masm)
static const int kHashMask
Definition: heap.h:2255
static const int kCapacityMask
Definition: heap.h:2253
static const int kMapHashShift
Definition: heap.h:2254
static const int kEntriesPerBucket
Definition: heap.h:2256
static void GenerateMiss(MacroAssembler *masm)
static void GenerateSloppyArguments(MacroAssembler *masm)
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
static const Register ReceiverRegister()
static const Register NameRegister()
static void GenerateNormal(MacroAssembler *masm)
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)
static const int kHasIndexedInterceptor
Definition: objects.h:6243
static const int kBitFieldOffset
Definition: objects.h:6228
static const int kIsAccessCheckNeeded
Definition: objects.h:6246
static const int kInstanceTypeOffset
Definition: objects.h:6229
static const int kInstanceSizeOffset
Definition: objects.h:6210
static const int kInObjectPropertiesOffset
Definition: objects.h:6212
static const int kIsObserved
Definition: objects.h:6245
static const int kHasNamedInterceptor
Definition: objects.h:6242
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kHashShift
Definition: objects.h:8499
static const int kHashFieldOffset
Definition: objects.h:8486
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8528
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
static const Register ReceiverRegister()
static const Register NameRegister()
static const Register ValueRegister()
static void GenerateMiss(MacroAssembler *masm)
static void GenerateMegamorphic(MacroAssembler *masm)
StrictMode strict_mode() const
Definition: ic.h:465
static void GenerateNormal(MacroAssembler *masm)
#define __
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
const int kPointerSize
Definition: globals.h:129
const Register r2
@ DONT_DO_SMI_CHECK
Definition: globals.h:640
const Register r6
const Register r0
const int kSmiTagSize
Definition: v8.h:5743
const LowDwVfpRegister d0
const Register ip
InlinedSmiCheck
Definition: ic.h:672
@ DISABLE_INLINED_SMI_CHECK
Definition: ic.h:672
@ ENABLE_INLINED_SMI_CHECK
Definition: ic.h:672
const Register r3
const int kPointerSizeLog2
Definition: globals.h:147
@ JS_VALUE_TYPE
Definition: objects.h:728
@ JS_GLOBAL_PROXY_TYPE
Definition: objects.h:737
@ JS_ARRAY_TYPE
Definition: objects.h:738
@ JS_OBJECT_TYPE
Definition: objects.h:731
@ FIRST_NONSTRING_TYPE
Definition: objects.h:758
@ FIRST_JS_RECEIVER_TYPE
Definition: objects.h:772
@ LAST_UNIQUE_NAME_TYPE
Definition: objects.h:757
@ FIRST_JS_OBJECT_TYPE
Definition: objects.h:775
@ JS_GLOBAL_OBJECT_TYPE
Definition: objects.h:735
@ JS_BUILTINS_OBJECT_TYPE
Definition: objects.h:736
const Register r4
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check)
MemOperand FieldMemOperand(Register object, int offset)
const Register r9
const Register r5
byte * Address
Definition: globals.h:101
const Register r1
void PrintF(const char *format,...)
Definition: utils.cc:80
const int kHeapObjectTag
Definition: v8.h:5737
const uint32_t kInternalizedTag
Definition: objects.h:551
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const intptr_t kSmiTagMask
Definition: v8.h:5744
const uint32_t kIsNotInternalizedMask
Definition: objects.h:549
KeyedStoreCheckMap
Definition: ic.h:524
@ kDontCheckMap
Definition: ic.h:524
@ kCheckMap
Definition: ic.h:524
const uint32_t kHoleNanLower32
Definition: globals.h:657
@ STRING_INDEX_IS_ARRAY_INDEX
Definition: code-stubs.h:1595
const uint32_t kHoleNanUpper32
Definition: globals.h:656
KeyedStoreIncrementLength
Definition: ic.h:527
@ kDontIncrementLength
Definition: ic.h:527
@ kIncrementLength
Definition: ic.h:527
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
@ READ_ONLY