V8 Project
ic-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 
6 #include "src/v8.h"
7 
8 #if V8_TARGET_ARCH_MIPS
9 
10 #include "src/codegen.h"
11 #include "src/ic/ic.h"
12 #include "src/ic/ic-compiler.h"
13 #include "src/ic/stub-cache.h"
14 
15 namespace v8 {
16 namespace internal {
17 
18 
19 // ----------------------------------------------------------------------------
20 // Static IC stub generators.
21 //
22 
23 #define __ ACCESS_MASM(masm)
24 
25 
26 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
27  Label* global_object) {
28  // Register usage:
29  // type: holds the receiver instance type on entry.
30  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
31  __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
32  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
33 }
34 
35 
36 // Helper function used from LoadIC GenerateNormal.
37 //
38 // elements: Property dictionary. It is not clobbered if a jump to the miss
39 // label is done.
40 // name: Property name. It is not clobbered if a jump to the miss label is
41 // done
42 // result: Register for the result. It is only updated if a jump to the miss
43 // label is not done. Can be the same as elements or name clobbering
44 // one of these in the case of not jumping to the miss label.
45 // The two scratch registers need to be different from elements, name and
46 // result.
47 // The generated code assumes that the receiver has slow properties,
48 // is not a global object and does not have interceptors.
49 // The address returned from GenerateStringDictionaryProbes() in scratch2
50 // is used.
51 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
52  Register elements, Register name,
53  Register result, Register scratch1,
54  Register scratch2) {
55  // Main use of the scratch registers.
56  // scratch1: Used as temporary and to hold the capacity of the property
57  // dictionary.
58  // scratch2: Used as temporary.
59  Label done;
60 
61  // Probe the dictionary.
62  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
63  name, scratch1, scratch2);
64 
65  // If probing finds an entry check that the value is a normal
66  // property.
67  __ bind(&done); // scratch2 == elements + 4 * index.
68  const int kElementsStartOffset =
71  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
72  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
73  __ And(at, scratch1,
74  Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
75  __ Branch(miss, ne, at, Operand(zero_reg));
76 
77  // Get the value at the masked, scaled index and return.
78  __ lw(result,
79  FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
80 }
81 
82 
83 // Helper function used from StoreIC::GenerateNormal.
84 //
85 // elements: Property dictionary. It is not clobbered if a jump to the miss
86 // label is done.
87 // name: Property name. It is not clobbered if a jump to the miss label is
88 // done
89 // value: The value to store.
90 // The two scratch registers need to be different from elements, name and
91 // result.
92 // The generated code assumes that the receiver has slow properties,
93 // is not a global object and does not have interceptors.
94 // The address returned from GenerateStringDictionaryProbes() in scratch2
95 // is used.
96 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
97  Register elements, Register name,
98  Register value, Register scratch1,
99  Register scratch2) {
100  // Main use of the scratch registers.
101  // scratch1: Used as temporary and to hold the capacity of the property
102  // dictionary.
103  // scratch2: Used as temporary.
104  Label done;
105 
106  // Probe the dictionary.
107  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
108  name, scratch1, scratch2);
109 
110  // If probing finds an entry in the dictionary check that the value
111  // is a normal property that is not read only.
112  __ bind(&done); // scratch2 == elements + 4 * index.
113  const int kElementsStartOffset =
116  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
117  const int kTypeAndReadOnlyMask =
118  (PropertyDetails::TypeField::kMask |
119  PropertyDetails::AttributesField::encode(READ_ONLY))
120  << kSmiTagSize;
121  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
122  __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
123  __ Branch(miss, ne, at, Operand(zero_reg));
124 
125  // Store the value at the masked, scaled index and return.
126  const int kValueOffset = kElementsStartOffset + kPointerSize;
127  __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
128  __ sw(value, MemOperand(scratch2));
129 
130  // Update the write barrier. Make sure not to clobber the value.
131  __ mov(scratch1, value);
132  __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
134 }
135 
136 
137 // Checks the receiver for special cases (value type, slow case bits).
138 // Falls through for regular JS object.
139 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
140  Register receiver, Register map,
141  Register scratch,
142  int interceptor_bit, Label* slow) {
143  // Check that the object isn't a smi.
144  __ JumpIfSmi(receiver, slow);
145  // Get the map of the receiver.
147  // Check bit field.
148  __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
149  __ And(at, scratch,
150  Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
151  __ Branch(slow, ne, at, Operand(zero_reg));
152  // Check that the object is some kind of JS object EXCEPT JS Value type.
153  // In the case that the object is a value-wrapper object,
154  // we enter the runtime system to make sure that indexing into string
155  // objects work as intended.
158  __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
159 }
160 
161 
162 // Loads an indexed element from a fast case array.
163 // If not_fast_array is NULL, doesn't perform the elements map check.
164 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
165  Register key, Register elements,
166  Register scratch1, Register scratch2,
167  Register result, Label* not_fast_array,
168  Label* out_of_range) {
169  // Register use:
170  //
171  // receiver - holds the receiver on entry.
172  // Unchanged unless 'result' is the same register.
173  //
174  // key - holds the smi key on entry.
175  // Unchanged unless 'result' is the same register.
176  //
177  // elements - holds the elements of the receiver on exit.
178  //
179  // result - holds the result on exit if the load succeeded.
180  // Allowed to be the the same as 'receiver' or 'key'.
181  // Unchanged on bailout so 'receiver' and 'key' can be safely
182  // used by further computation.
183  //
184  // Scratch registers:
185  //
186  // scratch1 - used to hold elements map and elements length.
187  // Holds the elements map if not_fast_array branch is taken.
188  //
189  // scratch2 - used to hold the loaded value.
190 
191  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
192  if (not_fast_array != NULL) {
193  // Check that the object is in fast mode (not dictionary).
194  __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
195  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
196  __ Branch(not_fast_array, ne, scratch1, Operand(at));
197  } else {
198  __ AssertFastElements(elements);
199  }
200 
201  // Check that the key (index) is within bounds.
202  __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
203  __ Branch(out_of_range, hs, key, Operand(scratch1));
204 
205  // Fast case: Do the load.
206  __ Addu(scratch1, elements,
208  // The key is a smi.
210  __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
211  __ addu(at, at, scratch1);
212  __ lw(scratch2, MemOperand(at));
213 
214  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
215  // In case the loaded value is the_hole we have to consult GetProperty
216  // to ensure the prototype chain is searched.
217  __ Branch(out_of_range, eq, scratch2, Operand(at));
218  __ mov(result, scratch2);
219 }
220 
221 
222 // Checks whether a key is an array index string or a unique name.
223 // Falls through if a key is a unique name.
224 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
225  Register map, Register hash,
226  Label* index_string, Label* not_unique) {
227  // The key is not a smi.
228  Label unique;
229  // Is it a name?
230  __ GetObjectType(key, map, hash);
231  __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
233  __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
234 
235  // Is the string an array index, with cached numeric value?
236  __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
237  __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
238  __ Branch(index_string, eq, at, Operand(zero_reg));
239 
240  // Is the string internalized? We know it's a string, so a single
241  // bit test is enough.
242  // map: key map
245  __ And(at, hash, Operand(kIsNotInternalizedMask));
246  __ Branch(not_unique, ne, at, Operand(zero_reg));
247 
248  __ bind(&unique);
249 }
250 
251 
252 void LoadIC::GenerateNormal(MacroAssembler* masm) {
253  Register dictionary = a0;
254  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
255  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
256 
257  Label slow;
258 
261  GenerateDictionaryLoad(masm, &slow, dictionary,
262  LoadDescriptor::NameRegister(), v0, a3, t0);
263  __ Ret();
264 
265  // Dictionary load failed, go slow (but don't miss).
266  __ bind(&slow);
268 }
269 
270 
271 // A register that isn't one of the parameters to the load ic.
272 static const Register LoadIC_TempRegister() { return a3; }
273 
274 
275 void LoadIC::GenerateMiss(MacroAssembler* masm) {
276  // The return address is in ra.
277  Isolate* isolate = masm->isolate();
278 
279  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
280 
281  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
282  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
283 
284  // Perform tail call to the entry.
285  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
286  __ TailCallExternalReference(ref, 2, 1);
287 }
288 
289 
290 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
291  // The return address is in ra.
292 
293  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
294  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
295 
296  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
297 }
298 
299 
300 static MemOperand GenerateMappedArgumentsLookup(
301  MacroAssembler* masm, Register object, Register key, Register scratch1,
302  Register scratch2, Register scratch3, Label* unmapped_case,
303  Label* slow_case) {
304  Heap* heap = masm->isolate()->heap();
305 
306  // Check that the receiver is a JSObject. Because of the map check
307  // later, we do not need to check for interceptors or whether it
308  // requires access checks.
309  __ JumpIfSmi(object, slow_case);
310  // Check that the object is some kind of JSObject.
311  __ GetObjectType(object, scratch1, scratch2);
312  __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
313 
314  // Check that the key is a positive smi.
315  __ And(scratch1, key, Operand(0x80000001));
316  __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
317 
318  // Load the elements into scratch1 and check its map.
319  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
320  __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
321  __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
322  // Check if element is in the range of mapped arguments. If not, jump
323  // to the unmapped lookup with the parameter map in scratch1.
324  __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
325  __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
326  __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
327 
328  // Load element index and check whether it is the hole.
329  const int kOffset =
331 
332  __ li(scratch3, Operand(kPointerSize >> 1));
333  __ Mul(scratch3, key, scratch3);
334  __ Addu(scratch3, scratch3, Operand(kOffset));
335 
336  __ Addu(scratch2, scratch1, scratch3);
337  __ lw(scratch2, MemOperand(scratch2));
338  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
339  __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
340 
341  // Load value from context and return it. We can reuse scratch1 because
342  // we do not jump to the unmapped lookup (which requires the parameter
343  // map in scratch1).
344  __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
345  __ li(scratch3, Operand(kPointerSize >> 1));
346  __ Mul(scratch3, scratch2, scratch3);
347  __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
348  __ Addu(scratch2, scratch1, scratch3);
349  return MemOperand(scratch2);
350 }
351 
352 
353 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
354  Register key,
355  Register parameter_map,
356  Register scratch,
357  Label* slow_case) {
358  // Element is in arguments backing store, which is referenced by the
359  // second element of the parameter_map. The parameter_map register
360  // must be loaded with the parameter map of the arguments object and is
361  // overwritten.
362  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
363  Register backing_store = parameter_map;
364  __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
365  __ CheckMap(backing_store, scratch, Heap::kFixedArrayMapRootIndex, slow_case,
367  __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
368  __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
369  __ li(scratch, Operand(kPointerSize >> 1));
370  __ Mul(scratch, key, scratch);
371  __ Addu(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
372  __ Addu(scratch, backing_store, scratch);
373  return MemOperand(scratch);
374 }
375 
376 
377 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
378  Register receiver = StoreDescriptor::ReceiverRegister();
379  Register key = StoreDescriptor::NameRegister();
380  Register value = StoreDescriptor::ValueRegister();
381  DCHECK(value.is(a0));
382 
383  Label slow, notin;
384  // Store address is returned in register (of MemOperand) mapped_location.
385  MemOperand mapped_location = GenerateMappedArgumentsLookup(
386  masm, receiver, key, a3, t0, t1, &notin, &slow);
387  __ sw(value, mapped_location);
388  __ mov(t5, value);
389  DCHECK_EQ(mapped_location.offset(), 0);
390  __ RecordWrite(a3, mapped_location.rm(), t5, kRAHasNotBeenSaved,
392  __ Ret(USE_DELAY_SLOT);
393  __ mov(v0, value); // (In delay slot) return the value stored in v0.
394  __ bind(&notin);
395  // The unmapped lookup expects that the parameter map is in a3.
396  // Store address is returned in register (of MemOperand) unmapped_location.
397  MemOperand unmapped_location =
398  GenerateUnmappedArgumentsLookup(masm, key, a3, t0, &slow);
399  __ sw(value, unmapped_location);
400  __ mov(t5, value);
401  DCHECK_EQ(unmapped_location.offset(), 0);
402  __ RecordWrite(a3, unmapped_location.rm(), t5, kRAHasNotBeenSaved,
404  __ Ret(USE_DELAY_SLOT);
405  __ mov(v0, a0); // (In delay slot) return the value stored in v0.
406  __ bind(&slow);
407  GenerateMiss(masm);
408 }
409 
410 
411 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
412  // The return address is in ra.
413  Isolate* isolate = masm->isolate();
414 
415  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
416 
418 
419  // Perform tail call to the entry.
420  ExternalReference ref =
421  ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
422 
423  __ TailCallExternalReference(ref, 2, 1);
424 }
425 
426 
427 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
428  // The return address is in ra.
429 
431 
432  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
433 }
434 
435 
436 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
437  // The return address is in ra.
438  Label slow, check_name, index_smi, index_name, property_array_property;
439  Label probe_dictionary, check_number_dictionary;
440 
441  Register key = LoadDescriptor::NameRegister();
442  Register receiver = LoadDescriptor::ReceiverRegister();
443  DCHECK(key.is(a2));
444  DCHECK(receiver.is(a1));
445 
446  Isolate* isolate = masm->isolate();
447 
448  // Check that the key is a smi.
449  __ JumpIfNotSmi(key, &check_name);
450  __ bind(&index_smi);
451  // Now the key is known to be a smi. This place is also jumped to from below
452  // where a numeric string is converted to a smi.
453 
454  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
456 
457  // Check the receiver's map to see if it has fast elements.
458  __ CheckFastElements(a0, a3, &check_number_dictionary);
459 
460  GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, NULL, &slow);
461  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, t0, a3);
462  __ Ret();
463 
464  __ bind(&check_number_dictionary);
465  __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
467 
468  // Check whether the elements is a number dictionary.
469  // a3: elements map
470  // t0: elements
471  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
472  __ Branch(&slow, ne, a3, Operand(at));
473  __ sra(a0, key, kSmiTagSize);
474  __ LoadFromNumberDictionary(&slow, t0, key, v0, a0, a3, t1);
475  __ Ret();
476 
477  // Slow case, key and receiver still in a2 and a1.
478  __ bind(&slow);
479  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, t0,
480  a3);
482 
483  __ bind(&check_name);
484  GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
485 
486  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
488 
489 
490  // If the receiver is a fast-case object, check the keyed lookup
491  // cache. Otherwise probe the dictionary.
494  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
495  __ Branch(&probe_dictionary, eq, t0, Operand(at));
496 
497  // Load the map of the receiver, compute the keyed lookup cache hash
498  // based on 32 bits of the map pointer and the name hash.
499  __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
500  __ sra(a3, a0, KeyedLookupCache::kMapHashShift);
502  __ sra(at, t0, Name::kHashShift);
503  __ xor_(a3, a3, at);
505  __ And(a3, a3, Operand(mask));
506 
507  // Load the key (consisting of map and unique name) from the cache and
508  // check for match.
509  Label load_in_object_property;
510  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
511  Label hit_on_nth_entry[kEntriesPerBucket];
512  ExternalReference cache_keys =
513  ExternalReference::keyed_lookup_cache_keys(isolate);
514  __ li(t0, Operand(cache_keys));
515  __ sll(at, a3, kPointerSizeLog2 + 1);
516  __ addu(t0, t0, at);
517 
518  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
519  Label try_next_entry;
520  __ lw(t1, MemOperand(t0, kPointerSize * i * 2));
521  __ Branch(&try_next_entry, ne, a0, Operand(t1));
522  __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1)));
523  __ Branch(&hit_on_nth_entry[i], eq, key, Operand(t1));
524  __ bind(&try_next_entry);
525  }
526 
527  __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
528  __ Branch(&slow, ne, a0, Operand(t1));
529  __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
530  __ Branch(&slow, ne, key, Operand(t1));
531 
532  // Get field offset.
533  // a0 : receiver's map
534  // a3 : lookup cache index
535  ExternalReference cache_field_offsets =
536  ExternalReference::keyed_lookup_cache_field_offsets(isolate);
537 
538  // Hit on nth entry.
539  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
540  __ bind(&hit_on_nth_entry[i]);
541  __ li(t0, Operand(cache_field_offsets));
542  __ sll(at, a3, kPointerSizeLog2);
543  __ addu(at, t0, at);
544  __ lw(t1, MemOperand(at, kPointerSize * i));
546  __ Subu(t1, t1, t2);
547  __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
548  if (i != 0) {
549  __ Branch(&load_in_object_property);
550  }
551  }
552 
553  // Load in-object property.
554  __ bind(&load_in_object_property);
556  __ addu(t2, t2, t1); // Index from start of object.
557  __ Subu(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag.
558  __ sll(at, t2, kPointerSizeLog2);
559  __ addu(at, receiver, at);
560  __ lw(v0, MemOperand(at));
561  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
562  t0, a3);
563  __ Ret();
564 
565  // Load property array property.
566  __ bind(&property_array_property);
567  __ lw(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
568  __ Addu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag);
569  __ sll(v0, t1, kPointerSizeLog2);
570  __ Addu(v0, v0, receiver);
571  __ lw(v0, MemOperand(v0));
572  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
573  t0, a3);
574  __ Ret();
575 
576 
577  // Do a quick inline probe of the receiver's dictionary, if it
578  // exists.
579  __ bind(&probe_dictionary);
580  // a3: elements
581  __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
583  GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
584  // Load the property to v0.
585  GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0);
586  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, t0,
587  a3);
588  __ Ret();
589 
590  __ bind(&index_name);
591  __ IndexFromHash(a3, key);
592  // Now jump to the place where smi keys are handled.
593  __ Branch(&index_smi);
594 }
595 
596 
597 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
598  // Return address is in ra.
599  Label miss;
600 
601  Register receiver = LoadDescriptor::ReceiverRegister();
602  Register index = LoadDescriptor::NameRegister();
603  Register scratch = a3;
604  Register result = v0;
605  DCHECK(!scratch.is(receiver) && !scratch.is(index));
606 
607  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
608  &miss, // When not a string.
609  &miss, // When not a number.
610  &miss, // When index out of range.
612  char_at_generator.GenerateFast(masm);
613  __ Ret();
614 
615  StubRuntimeCallHelper call_helper;
616  char_at_generator.GenerateSlow(masm, call_helper);
617 
618  __ bind(&miss);
619  GenerateMiss(masm);
620 }
621 
622 
623 static void KeyedStoreGenerateGenericHelper(
624  MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
625  KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
626  Register value, Register key, Register receiver, Register receiver_map,
627  Register elements_map, Register elements) {
628  Label transition_smi_elements;
629  Label finish_object_store, non_double_value, transition_double_elements;
630  Label fast_double_without_map_check;
631 
632  // Fast case: Do the store, could be either Object or double.
633  __ bind(fast_object);
634  Register scratch_value = t0;
635  Register address = t1;
636  if (check_map == kCheckMap) {
637  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
638  __ Branch(fast_double, ne, elements_map,
639  Operand(masm->isolate()->factory()->fixed_array_map()));
640  }
641 
642  // HOLECHECK: guards "A[i] = V"
643  // We have to go to the runtime if the current value is the hole because
644  // there may be a callback on the element.
645  Label holecheck_passed1;
646  __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
647  __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
648  __ addu(address, address, at);
649  __ lw(scratch_value, MemOperand(address));
650  __ Branch(&holecheck_passed1, ne, scratch_value,
651  Operand(masm->isolate()->factory()->the_hole_value()));
652  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
653  slow);
654 
655  __ bind(&holecheck_passed1);
656 
657  // Smi stores don't require further checks.
658  Label non_smi_value;
659  __ JumpIfNotSmi(value, &non_smi_value);
660 
661  if (increment_length == kIncrementLength) {
662  // Add 1 to receiver->length.
663  __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
664  __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
665  }
666  // It's irrelevant whether array is smi-only or not when writing a smi.
667  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
668  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
669  __ Addu(address, address, scratch_value);
670  __ sw(value, MemOperand(address));
671  __ Ret();
672 
673  __ bind(&non_smi_value);
674  // Escape to elements kind transition case.
675  __ CheckFastObjectElements(receiver_map, scratch_value,
676  &transition_smi_elements);
677 
678  // Fast elements array, store the value to the elements backing store.
679  __ bind(&finish_object_store);
680  if (increment_length == kIncrementLength) {
681  // Add 1 to receiver->length.
682  __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
683  __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
684  }
685  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
686  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
687  __ Addu(address, address, scratch_value);
688  __ sw(value, MemOperand(address));
689  // Update write barrier for the elements array address.
690  __ mov(scratch_value, value); // Preserve the value which is returned.
691  __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved,
693  __ Ret();
694 
695  __ bind(fast_double);
696  if (check_map == kCheckMap) {
697  // Check for fast double array case. If this fails, call through to the
698  // runtime.
699  __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
700  __ Branch(slow, ne, elements_map, Operand(at));
701  }
702 
703  // HOLECHECK: guards "A[i] double hole?"
704  // We have to see if the double version of the hole is present. If so
705  // go to the runtime.
706  __ Addu(address, elements, Operand(FixedDoubleArray::kHeaderSize +
707  kHoleNanUpper32Offset - kHeapObjectTag));
708  __ sll(at, key, kPointerSizeLog2);
709  __ addu(address, address, at);
710  __ lw(scratch_value, MemOperand(address));
711  __ Branch(&fast_double_without_map_check, ne, scratch_value,
712  Operand(kHoleNanUpper32));
713  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
714  slow);
715 
716  __ bind(&fast_double_without_map_check);
717  __ StoreNumberToDoubleElements(value, key,
718  elements, // Overwritten.
719  a3, // Scratch regs...
720  t0, t1, &transition_double_elements);
721  if (increment_length == kIncrementLength) {
722  // Add 1 to receiver->length.
723  __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
724  __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
725  }
726  __ Ret();
727 
728  __ bind(&transition_smi_elements);
729  // Transition the array appropriately depending on the value type.
730  __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
731  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
732  __ Branch(&non_double_value, ne, t0, Operand(at));
733 
734  // Value is a double. Transition FAST_SMI_ELEMENTS ->
735  // FAST_DOUBLE_ELEMENTS and complete the store.
736  __ LoadTransitionedArrayMapConditional(
737  FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, t0, slow);
740  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
741  receiver_map, mode, slow);
742  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
743  __ jmp(&fast_double_without_map_check);
744 
745  __ bind(&non_double_value);
746  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
747  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
748  receiver_map, t0, slow);
751  masm, receiver, key, value, receiver_map, mode, slow);
752  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
753  __ jmp(&finish_object_store);
754 
755  __ bind(&transition_double_elements);
756  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
757  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
758  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
759  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
760  receiver_map, t0, slow);
763  masm, receiver, key, value, receiver_map, mode, slow);
764  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
765  __ jmp(&finish_object_store);
766 }
767 
768 
769 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
770  StrictMode strict_mode) {
771  // ---------- S t a t e --------------
772  // -- a0 : value
773  // -- a1 : key
774  // -- a2 : receiver
775  // -- ra : return address
776  // -----------------------------------
777  Label slow, fast_object, fast_object_grow;
778  Label fast_double, fast_double_grow;
779  Label array, extra, check_if_double_array;
780 
781  // Register usage.
782  Register value = StoreDescriptor::ValueRegister();
783  Register key = StoreDescriptor::NameRegister();
784  Register receiver = StoreDescriptor::ReceiverRegister();
785  DCHECK(value.is(a0));
786  Register receiver_map = a3;
787  Register elements_map = t2;
788  Register elements = t3; // Elements array of the receiver.
789  // t0 and t1 are used as general scratch registers.
790 
791  // Check that the key is a smi.
792  __ JumpIfNotSmi(key, &slow);
793  // Check that the object isn't a smi.
794  __ JumpIfSmi(receiver, &slow);
795  // Get the map of the object.
796  __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
797  // Check that the receiver does not require access checks and is not observed.
798  // The generic stub does not perform map checks or handle observed objects.
799  __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
800  __ And(t0, t0,
801  Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
802  __ Branch(&slow, ne, t0, Operand(zero_reg));
803  // Check if the object is a JS array or not.
804  __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
805  __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
806  // Check that the object is some kind of JSObject.
807  __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
808 
809  // Object case: Check key against length in the elements array.
810  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
811  // Check array bounds. Both the key and the length of FixedArray are smis.
812  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
813  __ Branch(&fast_object, lo, key, Operand(t0));
814 
815  // Slow case, handle jump to runtime.
816  __ bind(&slow);
817  // Entry registers are intact.
818  // a0: value.
819  // a1: key.
820  // a2: receiver.
822 
823  // Extra capacity case: Check if there is extra capacity to
824  // perform the store and update the length. Used for adding one
825  // element to the array by writing to array[array.length].
826  __ bind(&extra);
827  // Condition code from comparing key and array length is still available.
828  // Only support writing to array[array.length].
829  __ Branch(&slow, ne, key, Operand(t0));
830  // Check for room in the elements backing store.
831  // Both the key and the length of FixedArray are smis.
832  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
833  __ Branch(&slow, hs, key, Operand(t0));
834  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
835  __ Branch(&check_if_double_array, ne, elements_map,
836  Heap::kFixedArrayMapRootIndex);
837 
838  __ jmp(&fast_object_grow);
839 
840  __ bind(&check_if_double_array);
841  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
842  __ jmp(&fast_double_grow);
843 
844  // Array case: Get the length and the elements array from the JS
845  // array. Check that the array is in fast mode (and writable); if it
846  // is the length is always a smi.
847  __ bind(&array);
848  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
849 
850  // Check the key against the length in the array.
851  __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
852  __ Branch(&extra, hs, key, Operand(t0));
853 
854  KeyedStoreGenerateGenericHelper(
855  masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
856  value, key, receiver, receiver_map, elements_map, elements);
857  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
858  &slow, kDontCheckMap, kIncrementLength, value,
859  key, receiver, receiver_map, elements_map,
860  elements);
861 }
862 
863 
864 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
865  // Push receiver, key and value for runtime call.
868 
869  ExternalReference ref =
870  ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
871  __ TailCallExternalReference(ref, 3, 1);
872 }
873 
874 
875 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
876  Register receiver = StoreDescriptor::ReceiverRegister();
877  Register name = StoreDescriptor::NameRegister();
878  DCHECK(receiver.is(a1));
879  DCHECK(name.is(a2));
881 
882  // Get the receiver from the stack and probe the stub cache.
884  Code::ComputeHandlerFlags(Code::STORE_IC));
885  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
886  name, a3, t0, t1, t2);
887 
888  // Cache miss: Jump to runtime.
889  GenerateMiss(masm);
890 }
891 
892 
893 void StoreIC::GenerateMiss(MacroAssembler* masm) {
896  // Perform tail call to the entry.
897  ExternalReference ref =
898  ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
899  __ TailCallExternalReference(ref, 3, 1);
900 }
901 
902 
903 void StoreIC::GenerateNormal(MacroAssembler* masm) {
904  Label miss;
905  Register receiver = StoreDescriptor::ReceiverRegister();
906  Register name = StoreDescriptor::NameRegister();
907  Register value = StoreDescriptor::ValueRegister();
908  Register dictionary = a3;
909  DCHECK(receiver.is(a1));
910  DCHECK(name.is(a2));
911  DCHECK(value.is(a0));
912 
913  __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
914 
915  GenerateDictionaryStore(masm, &miss, dictionary, name, value, t0, t1);
916  Counters* counters = masm->isolate()->counters();
917  __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
918  __ Ret();
919 
920  __ bind(&miss);
921  __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
922  GenerateMiss(masm);
923 }
924 
925 
926 #undef __
927 
928 
930  switch (op) {
931  case Token::EQ_STRICT:
932  case Token::EQ:
933  return eq;
934  case Token::LT:
935  return lt;
936  case Token::GT:
937  return gt;
938  case Token::LTE:
939  return le;
940  case Token::GTE:
941  return ge;
942  default:
943  UNREACHABLE();
944  return kNoCondition;
945  }
946 }
947 
948 
950  // The address of the instruction following the call.
951  Address andi_instruction_address =
953 
954  // If the instruction following the call is not a andi at, rx, #yyy, nothing
955  // was inlined.
956  Instr instr = Assembler::instr_at(andi_instruction_address);
957  return Assembler::IsAndImmediate(instr) &&
958  Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
959 }
960 
961 
962 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
963  Address andi_instruction_address =
965 
966  // If the instruction following the call is not a andi at, rx, #yyy, nothing
967  // was inlined.
968  Instr instr = Assembler::instr_at(andi_instruction_address);
969  if (!(Assembler::IsAndImmediate(instr) &&
970  Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
971  return;
972  }
973 
974  // The delta to the start of the map check instruction and the
975  // condition code uses at the patched jump.
976  int delta = Assembler::GetImmediate16(instr);
977  delta += Assembler::GetRs(instr) * kImm16Mask;
978  // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
979  // signals that nothing was inlined.
980  if (delta == 0) {
981  return;
982  }
983 
984  if (FLAG_trace_ic) {
985  PrintF("[ patching ic at %p, andi=%p, delta=%d\n", address,
986  andi_instruction_address, delta);
987  }
988 
989  Address patch_address =
990  andi_instruction_address - delta * Instruction::kInstrSize;
991  Instr instr_at_patch = Assembler::instr_at(patch_address);
992  Instr branch_instr =
994  // This is patching a conditional "jump if not smi/jump if smi" site.
995  // Enabling by changing from
996  // andi at, rx, 0
997  // Branch <target>, eq, at, Operand(zero_reg)
998  // to:
999  // andi at, rx, #kSmiTagMask
1000  // Branch <target>, ne, at, Operand(zero_reg)
1001  // and vice-versa to be disabled again.
1002  CodePatcher patcher(patch_address, 2);
1003  Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
1004  if (check == ENABLE_INLINED_SMI_CHECK) {
1005  DCHECK(Assembler::IsAndImmediate(instr_at_patch));
1006  DCHECK_EQ(0, Assembler::GetImmediate16(instr_at_patch));
1007  patcher.masm()->andi(at, reg, kSmiTagMask);
1008  } else {
1010  DCHECK(Assembler::IsAndImmediate(instr_at_patch));
1011  patcher.masm()->andi(at, reg, 0);
1012  }
1013  DCHECK(Assembler::IsBranch(branch_instr));
1014  if (Assembler::IsBeq(branch_instr)) {
1015  patcher.ChangeBranchCondition(ne);
1016  } else {
1017  DCHECK(Assembler::IsBne(branch_instr));
1018  patcher.ChangeBranchCondition(eq);
1019  }
1020 }
1021 }
1022 } // namespace v8::internal
1023 
1024 #endif // V8_TARGET_ARCH_MIPS
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1591
static bool IsBne(Instr instr)
static uint32_t GetRt(Instr instr)
static uint32_t GetRs(Instr instr)
static bool IsBranch(Instr instr)
static const int kCallTargetAddressOffset
static uint32_t GetImmediate16(Instr instr)
static bool IsAndImmediate(Instr instr)
static bool IsBeq(Instr instr)
static Flags ComputeHandlerFlags(Kind handler_kind, StubType type=NORMAL, CacheHolderFlag holder=kCacheOnReceiver)
Definition: objects-inl.h:4975
static Flags RemoveTypeAndHolderFromFlags(Flags flags)
Definition: objects-inl.h:5012
uint32_t Flags
Definition: objects.h:4929
static bool HasInlinedSmiCode(Address address)
static Condition ComputeCondition(Token::Value op)
static void GenerateSmiToDouble(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *allocation_memento_found)
static void GenerateDoubleToObject(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static const int kLengthOffset
Definition: objects.h:2392
static const int kHeaderSize
Definition: objects.h:2393
static const int kElementsStartIndex
Definition: objects.h:3274
static const int kMapOffset
Definition: objects.h:1427
Isolate * isolate() const
Definition: ic.h:136
Address address() const
Definition: ic-inl.h:19
Counters * counters()
Definition: isolate.h:857
static const int kLengthOffset
Definition: objects.h:10072
static const int kPropertiesOffset
Definition: objects.h:2193
static const int kElementsOffset
Definition: objects.h:2194
static void GenerateMiss(MacroAssembler *masm)
static void GenerateString(MacroAssembler *masm)
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static void GenerateGeneric(MacroAssembler *masm)
static const int kHashMask
Definition: heap.h:2255
static const int kCapacityMask
Definition: heap.h:2253
static const int kMapHashShift
Definition: heap.h:2254
static const int kEntriesPerBucket
Definition: heap.h:2256
static void GenerateMiss(MacroAssembler *masm)
static void GenerateSloppyArguments(MacroAssembler *masm)
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
static const Register ReceiverRegister()
static const Register NameRegister()
static void GenerateNormal(MacroAssembler *masm)
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)
static const int kHasIndexedInterceptor
Definition: objects.h:6243
static const int kBitFieldOffset
Definition: objects.h:6228
static const int kIsAccessCheckNeeded
Definition: objects.h:6246
static const int kInstanceTypeOffset
Definition: objects.h:6229
static const int kInstanceSizeOffset
Definition: objects.h:6210
static const int kInObjectPropertiesOffset
Definition: objects.h:6212
static const int kIsObserved
Definition: objects.h:6245
static const int kHasNamedInterceptor
Definition: objects.h:6242
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kHashShift
Definition: objects.h:8499
static const int kHashFieldOffset
Definition: objects.h:8486
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8528
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
static const Register ReceiverRegister()
static const Register NameRegister()
static const Register ValueRegister()
static void GenerateMiss(MacroAssembler *masm)
static void GenerateMegamorphic(MacroAssembler *masm)
StrictMode strict_mode() const
Definition: ic.h:465
static void GenerateNormal(MacroAssembler *masm)
#define __
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
const int kPointerSize
Definition: globals.h:129
@ DONT_DO_SMI_CHECK
Definition: globals.h:640
const int kSmiTagSize
Definition: v8.h:5743
InlinedSmiCheck
Definition: ic.h:672
@ DISABLE_INLINED_SMI_CHECK
Definition: ic.h:672
@ ENABLE_INLINED_SMI_CHECK
Definition: ic.h:672
const int kPointerSizeLog2
Definition: globals.h:147
@ JS_VALUE_TYPE
Definition: objects.h:728
@ JS_GLOBAL_PROXY_TYPE
Definition: objects.h:737
@ JS_ARRAY_TYPE
Definition: objects.h:738
@ JS_OBJECT_TYPE
Definition: objects.h:731
@ FIRST_NONSTRING_TYPE
Definition: objects.h:758
@ FIRST_JS_RECEIVER_TYPE
Definition: objects.h:772
@ LAST_UNIQUE_NAME_TYPE
Definition: objects.h:757
@ FIRST_JS_OBJECT_TYPE
Definition: objects.h:775
@ JS_GLOBAL_OBJECT_TYPE
Definition: objects.h:735
@ JS_BUILTINS_OBJECT_TYPE
Definition: objects.h:736
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check)
MemOperand FieldMemOperand(Register object, int offset)
byte * Address
Definition: globals.h:101
void PrintF(const char *format,...)
Definition: utils.cc:80
const int kHeapObjectTag
Definition: v8.h:5737
const uint32_t kInternalizedTag
Definition: objects.h:551
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const intptr_t kSmiTagMask
Definition: v8.h:5744
const uint32_t kIsNotInternalizedMask
Definition: objects.h:549
const int kSmiTag
Definition: v8.h:5742
KeyedStoreCheckMap
Definition: ic.h:524
@ kDontCheckMap
Definition: ic.h:524
@ kCheckMap
Definition: ic.h:524
@ STRING_INDEX_IS_ARRAY_INDEX
Definition: code-stubs.h:1595
const uint32_t kHoleNanUpper32
Definition: globals.h:656
KeyedStoreIncrementLength
Definition: ic.h:527
@ kDontIncrementLength
Definition: ic.h:527
@ kIncrementLength
Definition: ic.h:527
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
@ READ_ONLY
static Register from_code(int code)