V8 Project
ic-mips64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 
6 #include "src/v8.h"
7 
8 #if V8_TARGET_ARCH_MIPS64
9 
10 #include "src/codegen.h"
11 #include "src/ic/ic.h"
12 #include "src/ic/ic-compiler.h"
13 #include "src/ic/stub-cache.h"
14 
15 namespace v8 {
16 namespace internal {
17 
18 
19 // ----------------------------------------------------------------------------
20 // Static IC stub generators.
21 //
22 
23 #define __ ACCESS_MASM(masm)
24 
25 
26 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
27  Label* global_object) {
28  // Register usage:
29  // type: holds the receiver instance type on entry.
30  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
31  __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
32  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
33 }
34 
35 
36 // Helper function used from LoadIC GenerateNormal.
37 //
38 // elements: Property dictionary. It is not clobbered if a jump to the miss
39 // label is done.
40 // name: Property name. It is not clobbered if a jump to the miss label is
41 // done
42 // result: Register for the result. It is only updated if a jump to the miss
43 // label is not done. Can be the same as elements or name clobbering
44 // one of these in the case of not jumping to the miss label.
45 // The two scratch registers need to be different from elements, name and
46 // result.
47 // The generated code assumes that the receiver has slow properties,
48 // is not a global object and does not have interceptors.
49 // The address returned from GenerateStringDictionaryProbes() in scratch2
50 // is used.
51 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
52  Register elements, Register name,
53  Register result, Register scratch1,
54  Register scratch2) {
55  // Main use of the scratch registers.
56  // scratch1: Used as temporary and to hold the capacity of the property
57  // dictionary.
58  // scratch2: Used as temporary.
59  Label done;
60 
61  // Probe the dictionary.
62  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
63  name, scratch1, scratch2);
64 
65  // If probing finds an entry check that the value is a normal
66  // property.
67  __ bind(&done); // scratch2 == elements + 4 * index.
68  const int kElementsStartOffset =
71  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
72  __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
73  __ And(at, scratch1,
74  Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
75  __ Branch(miss, ne, at, Operand(zero_reg));
76 
77  // Get the value at the masked, scaled index and return.
78  __ ld(result,
79  FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
80 }
81 
82 
83 // Helper function used from StoreIC::GenerateNormal.
84 //
85 // elements: Property dictionary. It is not clobbered if a jump to the miss
86 // label is done.
87 // name: Property name. It is not clobbered if a jump to the miss label is
88 // done
89 // value: The value to store.
90 // The two scratch registers need to be different from elements, name and
91 // result.
92 // The generated code assumes that the receiver has slow properties,
93 // is not a global object and does not have interceptors.
94 // The address returned from GenerateStringDictionaryProbes() in scratch2
95 // is used.
96 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
97  Register elements, Register name,
98  Register value, Register scratch1,
99  Register scratch2) {
100  // Main use of the scratch registers.
101  // scratch1: Used as temporary and to hold the capacity of the property
102  // dictionary.
103  // scratch2: Used as temporary.
104  Label done;
105 
106  // Probe the dictionary.
107  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
108  name, scratch1, scratch2);
109 
110  // If probing finds an entry in the dictionary check that the value
111  // is a normal property that is not read only.
112  __ bind(&done); // scratch2 == elements + 4 * index.
113  const int kElementsStartOffset =
116  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
117  const int kTypeAndReadOnlyMask =
118  (PropertyDetails::TypeField::kMask |
119  PropertyDetails::AttributesField::encode(READ_ONLY));
120  __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
121  __ And(at, scratch1, Operand(Smi::FromInt(kTypeAndReadOnlyMask)));
122  __ Branch(miss, ne, at, Operand(zero_reg));
123 
124  // Store the value at the masked, scaled index and return.
125  const int kValueOffset = kElementsStartOffset + kPointerSize;
126  __ Daddu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
127  __ sd(value, MemOperand(scratch2));
128 
129  // Update the write barrier. Make sure not to clobber the value.
130  __ mov(scratch1, value);
131  __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
133 }
134 
135 
136 // Checks the receiver for special cases (value type, slow case bits).
137 // Falls through for regular JS object.
138 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
139  Register receiver, Register map,
140  Register scratch,
141  int interceptor_bit, Label* slow) {
142  // Check that the object isn't a smi.
143  __ JumpIfSmi(receiver, slow);
144  // Get the map of the receiver.
146  // Check bit field.
147  __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
148  __ And(at, scratch,
149  Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
150  __ Branch(slow, ne, at, Operand(zero_reg));
151  // Check that the object is some kind of JS object EXCEPT JS Value type.
152  // In the case that the object is a value-wrapper object,
153  // we enter the runtime system to make sure that indexing into string
154  // objects work as intended.
157  __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
158 }
159 
160 
161 // Loads an indexed element from a fast case array.
162 // If not_fast_array is NULL, doesn't perform the elements map check.
163 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
164  Register key, Register elements,
165  Register scratch1, Register scratch2,
166  Register result, Label* not_fast_array,
167  Label* out_of_range) {
168  // Register use:
169  //
170  // receiver - holds the receiver on entry.
171  // Unchanged unless 'result' is the same register.
172  //
173  // key - holds the smi key on entry.
174  // Unchanged unless 'result' is the same register.
175  //
176  // elements - holds the elements of the receiver on exit.
177  //
178  // result - holds the result on exit if the load succeeded.
179  // Allowed to be the the same as 'receiver' or 'key'.
180  // Unchanged on bailout so 'receiver' and 'key' can be safely
181  // used by further computation.
182  //
183  // Scratch registers:
184  //
185  // scratch1 - used to hold elements map and elements length.
186  // Holds the elements map if not_fast_array branch is taken.
187  //
188  // scratch2 - used to hold the loaded value.
189 
190  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
191  if (not_fast_array != NULL) {
192  // Check that the object is in fast mode (not dictionary).
193  __ ld(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
194  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
195  __ Branch(not_fast_array, ne, scratch1, Operand(at));
196  } else {
197  __ AssertFastElements(elements);
198  }
199 
200  // Check that the key (index) is within bounds.
201  __ ld(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
202  __ Branch(out_of_range, hs, key, Operand(scratch1));
203 
204  // Fast case: Do the load.
205  __ Daddu(scratch1, elements,
207  // The key is a smi.
209  __ SmiScale(at, key, kPointerSizeLog2);
210  __ daddu(at, at, scratch1);
211  __ ld(scratch2, MemOperand(at));
212 
213  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
214  // In case the loaded value is the_hole we have to consult GetProperty
215  // to ensure the prototype chain is searched.
216  __ Branch(out_of_range, eq, scratch2, Operand(at));
217  __ mov(result, scratch2);
218 }
219 
220 
221 // Checks whether a key is an array index string or a unique name.
222 // Falls through if a key is a unique name.
223 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
224  Register map, Register hash,
225  Label* index_string, Label* not_unique) {
226  // The key is not a smi.
227  Label unique;
228  // Is it a name?
229  __ GetObjectType(key, map, hash);
230  __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
232  __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
233 
234  // Is the string an array index, with cached numeric value?
235  __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
236  __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
237  __ Branch(index_string, eq, at, Operand(zero_reg));
238 
239  // Is the string internalized? We know it's a string, so a single
240  // bit test is enough.
241  // map: key map
244  __ And(at, hash, Operand(kIsNotInternalizedMask));
245  __ Branch(not_unique, ne, at, Operand(zero_reg));
246 
247  __ bind(&unique);
248 }
249 
250 
251 void LoadIC::GenerateNormal(MacroAssembler* masm) {
252  Register dictionary = a0;
253  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
254  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
255  Label slow;
256 
259  GenerateDictionaryLoad(masm, &slow, dictionary,
260  LoadDescriptor::NameRegister(), v0, a3, a4);
261  __ Ret();
262 
263  // Dictionary load failed, go slow (but don't miss).
264  __ bind(&slow);
266 }
267 
268 
269 // A register that isn't one of the parameters to the load ic.
270 static const Register LoadIC_TempRegister() { return a3; }
271 
272 
273 void LoadIC::GenerateMiss(MacroAssembler* masm) {
274  // The return address is on the stack.
275  Isolate* isolate = masm->isolate();
276 
277  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
278 
279  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
280  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
281 
282  // Perform tail call to the entry.
283  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
284  __ TailCallExternalReference(ref, 2, 1);
285 }
286 
287 
288 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
289  // The return address is in ra.
290 
291  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
292  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
293 
294  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
295 }
296 
297 
298 static MemOperand GenerateMappedArgumentsLookup(
299  MacroAssembler* masm, Register object, Register key, Register scratch1,
300  Register scratch2, Register scratch3, Label* unmapped_case,
301  Label* slow_case) {
302  Heap* heap = masm->isolate()->heap();
303 
304  // Check that the receiver is a JSObject. Because of the map check
305  // later, we do not need to check for interceptors or whether it
306  // requires access checks.
307  __ JumpIfSmi(object, slow_case);
308  // Check that the object is some kind of JSObject.
309  __ GetObjectType(object, scratch1, scratch2);
310  __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
311 
312  // Check that the key is a positive smi.
313  __ NonNegativeSmiTst(key, scratch1);
314  __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
315 
316  // Load the elements into scratch1 and check its map.
317  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
318  __ ld(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
319  __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
320  // Check if element is in the range of mapped arguments. If not, jump
321  // to the unmapped lookup with the parameter map in scratch1.
322  __ ld(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
323  __ Dsubu(scratch2, scratch2, Operand(Smi::FromInt(2)));
324  __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
325 
326  // Load element index and check whether it is the hole.
327  const int kOffset =
329 
330  __ SmiUntag(scratch3, key);
331  __ dsll(scratch3, scratch3, kPointerSizeLog2);
332  __ Daddu(scratch3, scratch3, Operand(kOffset));
333 
334  __ Daddu(scratch2, scratch1, scratch3);
335  __ ld(scratch2, MemOperand(scratch2));
336  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
337  __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
338 
339  // Load value from context and return it. We can reuse scratch1 because
340  // we do not jump to the unmapped lookup (which requires the parameter
341  // map in scratch1).
342  __ ld(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
343  __ SmiUntag(scratch3, scratch2);
344  __ dsll(scratch3, scratch3, kPointerSizeLog2);
345  __ Daddu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
346  __ Daddu(scratch2, scratch1, scratch3);
347  return MemOperand(scratch2);
348 }
349 
350 
351 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
352  Register key,
353  Register parameter_map,
354  Register scratch,
355  Label* slow_case) {
356  // Element is in arguments backing store, which is referenced by the
357  // second element of the parameter_map. The parameter_map register
358  // must be loaded with the parameter map of the arguments object and is
359  // overwritten.
360  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
361  Register backing_store = parameter_map;
362  __ ld(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
363  __ CheckMap(backing_store, scratch, Heap::kFixedArrayMapRootIndex, slow_case,
365  __ ld(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
366  __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
367  __ SmiUntag(scratch, key);
368  __ dsll(scratch, scratch, kPointerSizeLog2);
369  __ Daddu(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
370  __ Daddu(scratch, backing_store, scratch);
371  return MemOperand(scratch);
372 }
373 
374 
375 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
376  Register receiver = StoreDescriptor::ReceiverRegister();
377  Register key = StoreDescriptor::NameRegister();
378  Register value = StoreDescriptor::ValueRegister();
379  DCHECK(value.is(a0));
380 
381  Label slow, notin;
382  // Store address is returned in register (of MemOperand) mapped_location.
383  MemOperand mapped_location = GenerateMappedArgumentsLookup(
384  masm, receiver, key, a3, a4, a5, &notin, &slow);
385  __ sd(value, mapped_location);
386  __ mov(t1, value);
387  DCHECK_EQ(mapped_location.offset(), 0);
388  __ RecordWrite(a3, mapped_location.rm(), t1, kRAHasNotBeenSaved,
390  __ Ret(USE_DELAY_SLOT);
391  __ mov(v0, value); // (In delay slot) return the value stored in v0.
392  __ bind(&notin);
393  // The unmapped lookup expects that the parameter map is in a3.
394  // Store address is returned in register (of MemOperand) unmapped_location.
395  MemOperand unmapped_location =
396  GenerateUnmappedArgumentsLookup(masm, key, a3, a4, &slow);
397  __ sd(value, unmapped_location);
398  __ mov(t1, value);
399  DCHECK_EQ(unmapped_location.offset(), 0);
400  __ RecordWrite(a3, unmapped_location.rm(), t1, kRAHasNotBeenSaved,
402  __ Ret(USE_DELAY_SLOT);
403  __ mov(v0, a0); // (In delay slot) return the value stored in v0.
404  __ bind(&slow);
405  GenerateMiss(masm);
406 }
407 
408 
409 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
410  // The return address is in ra.
411  Isolate* isolate = masm->isolate();
412 
413  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
414 
416 
417  // Perform tail call to the entry.
418  ExternalReference ref =
419  ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
420 
421  __ TailCallExternalReference(ref, 2, 1);
422 }
423 
424 
425 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
426  // The return address is in ra.
427 
429 
430  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
431 }
432 
433 
434 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
435  // The return address is in ra.
436  Label slow, check_name, index_smi, index_name, property_array_property;
437  Label probe_dictionary, check_number_dictionary;
438 
439  Register key = LoadDescriptor::NameRegister();
440  Register receiver = LoadDescriptor::ReceiverRegister();
441  DCHECK(key.is(a2));
442  DCHECK(receiver.is(a1));
443 
444  Isolate* isolate = masm->isolate();
445 
446  // Check that the key is a smi.
447  __ JumpIfNotSmi(key, &check_name);
448  __ bind(&index_smi);
449  // Now the key is known to be a smi. This place is also jumped to from below
450  // where a numeric string is converted to a smi.
451 
452  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
454 
455  // Check the receiver's map to see if it has fast elements.
456  __ CheckFastElements(a0, a3, &check_number_dictionary);
457 
458  GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, NULL, &slow);
459  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a4, a3);
460  __ Ret();
461 
462  __ bind(&check_number_dictionary);
463  __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset));
465 
466  // Check whether the elements is a number dictionary.
467  // a3: elements map
468  // a4: elements
469  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
470  __ Branch(&slow, ne, a3, Operand(at));
471  __ dsra32(a0, key, 0);
472  __ LoadFromNumberDictionary(&slow, a4, key, v0, a0, a3, a5);
473  __ Ret();
474 
475  // Slow case, key and receiver still in a2 and a1.
476  __ bind(&slow);
477  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, a4,
478  a3);
480 
481  __ bind(&check_name);
482  GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
483 
484  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
486 
487 
488  // If the receiver is a fast-case object, check the keyed lookup
489  // cache. Otherwise probe the dictionary.
492  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
493  __ Branch(&probe_dictionary, eq, a4, Operand(at));
494 
495  // Load the map of the receiver, compute the keyed lookup cache hash
496  // based on 32 bits of the map pointer and the name hash.
497  __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
498  __ dsll32(a3, a0, 0);
499  __ dsrl32(a3, a3, 0);
500  __ dsra(a3, a3, KeyedLookupCache::kMapHashShift);
502  __ dsra(at, a4, Name::kHashShift);
503  __ xor_(a3, a3, at);
505  __ And(a3, a3, Operand(mask));
506 
507  // Load the key (consisting of map and unique name) from the cache and
508  // check for match.
509  Label load_in_object_property;
510  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
511  Label hit_on_nth_entry[kEntriesPerBucket];
512  ExternalReference cache_keys =
513  ExternalReference::keyed_lookup_cache_keys(isolate);
514  __ li(a4, Operand(cache_keys));
515  __ dsll(at, a3, kPointerSizeLog2 + 1);
516  __ daddu(a4, a4, at);
517 
518  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
519  Label try_next_entry;
520  __ ld(a5, MemOperand(a4, kPointerSize * i * 2));
521  __ Branch(&try_next_entry, ne, a0, Operand(a5));
522  __ ld(a5, MemOperand(a4, kPointerSize * (i * 2 + 1)));
523  __ Branch(&hit_on_nth_entry[i], eq, key, Operand(a5));
524  __ bind(&try_next_entry);
525  }
526 
527  __ ld(a5, MemOperand(a4, kPointerSize * (kEntriesPerBucket - 1) * 2));
528  __ Branch(&slow, ne, a0, Operand(a5));
529  __ ld(a5, MemOperand(a4, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
530  __ Branch(&slow, ne, key, Operand(a5));
531 
532  // Get field offset.
533  // a0 : receiver's map
534  // a3 : lookup cache index
535  ExternalReference cache_field_offsets =
536  ExternalReference::keyed_lookup_cache_field_offsets(isolate);
537 
538  // Hit on nth entry.
539  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
540  __ bind(&hit_on_nth_entry[i]);
541  __ li(a4, Operand(cache_field_offsets));
542 
543  // TODO(yy) This data structure does NOT follow natural pointer size.
544  __ dsll(at, a3, kPointerSizeLog2 - 1);
545  __ daddu(at, a4, at);
546  __ lwu(a5, MemOperand(at, kPointerSize / 2 * i));
547 
549  __ Dsubu(a5, a5, a6);
550  __ Branch(&property_array_property, ge, a5, Operand(zero_reg));
551  if (i != 0) {
552  __ Branch(&load_in_object_property);
553  }
554  }
555 
556  // Load in-object property.
557  __ bind(&load_in_object_property);
559  // Index from start of object.
560  __ daddu(a6, a6, a5);
561  // Remove the heap tag.
562  __ Dsubu(receiver, receiver, Operand(kHeapObjectTag));
563  __ dsll(at, a6, kPointerSizeLog2);
564  __ daddu(at, receiver, at);
565  __ ld(v0, MemOperand(at));
566  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
567  a4, a3);
568  __ Ret();
569 
570  // Load property array property.
571  __ bind(&property_array_property);
572  __ ld(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
573  __ Daddu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag);
574  __ dsll(v0, a5, kPointerSizeLog2);
575  __ Daddu(v0, v0, a1);
576  __ ld(v0, MemOperand(v0));
577  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
578  a4, a3);
579  __ Ret();
580 
581 
582  // Do a quick inline probe of the receiver's dictionary, if it
583  // exists.
584  __ bind(&probe_dictionary);
585  // a3: elements
586  __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
588  GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
589  // Load the property to v0.
590  GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4);
591  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, a4,
592  a3);
593  __ Ret();
594 
595  __ bind(&index_name);
596  __ IndexFromHash(a3, key);
597  // Now jump to the place where smi keys are handled.
598  __ Branch(&index_smi);
599 }
600 
601 
602 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
603  // Return address is in ra.
604  Label miss;
605 
606  Register receiver = LoadDescriptor::ReceiverRegister();
607  Register index = LoadDescriptor::NameRegister();
608  Register scratch = a3;
609  Register result = v0;
610  DCHECK(!scratch.is(receiver) && !scratch.is(index));
611 
612  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
613  &miss, // When not a string.
614  &miss, // When not a number.
615  &miss, // When index out of range.
617  char_at_generator.GenerateFast(masm);
618  __ Ret();
619 
620  StubRuntimeCallHelper call_helper;
621  char_at_generator.GenerateSlow(masm, call_helper);
622 
623  __ bind(&miss);
624  GenerateMiss(masm);
625 }
626 
627 
628 static void KeyedStoreGenerateGenericHelper(
629  MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
630  KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
631  Register value, Register key, Register receiver, Register receiver_map,
632  Register elements_map, Register elements) {
633  Label transition_smi_elements;
634  Label finish_object_store, non_double_value, transition_double_elements;
635  Label fast_double_without_map_check;
636 
637  // Fast case: Do the store, could be either Object or double.
638  __ bind(fast_object);
639  Register scratch_value = a4;
640  Register address = a5;
641  if (check_map == kCheckMap) {
642  __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
643  __ Branch(fast_double, ne, elements_map,
644  Operand(masm->isolate()->factory()->fixed_array_map()));
645  }
646 
647  // HOLECHECK: guards "A[i] = V"
648  // We have to go to the runtime if the current value is the hole because
649  // there may be a callback on the element.
650  Label holecheck_passed1;
651  __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
652  __ SmiScale(at, key, kPointerSizeLog2);
653  __ daddu(address, address, at);
654  __ ld(scratch_value, MemOperand(address));
655 
656  __ Branch(&holecheck_passed1, ne, scratch_value,
657  Operand(masm->isolate()->factory()->the_hole_value()));
658  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
659  slow);
660 
661  __ bind(&holecheck_passed1);
662 
663  // Smi stores don't require further checks.
664  Label non_smi_value;
665  __ JumpIfNotSmi(value, &non_smi_value);
666 
667  if (increment_length == kIncrementLength) {
668  // Add 1 to receiver->length.
669  __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
670  __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
671  }
672  // It's irrelevant whether array is smi-only or not when writing a smi.
673  __ Daddu(address, elements,
675  __ SmiScale(scratch_value, key, kPointerSizeLog2);
676  __ Daddu(address, address, scratch_value);
677  __ sd(value, MemOperand(address));
678  __ Ret();
679 
680  __ bind(&non_smi_value);
681  // Escape to elements kind transition case.
682  __ CheckFastObjectElements(receiver_map, scratch_value,
683  &transition_smi_elements);
684 
685  // Fast elements array, store the value to the elements backing store.
686  __ bind(&finish_object_store);
687  if (increment_length == kIncrementLength) {
688  // Add 1 to receiver->length.
689  __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
690  __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
691  }
692  __ Daddu(address, elements,
694  __ SmiScale(scratch_value, key, kPointerSizeLog2);
695  __ Daddu(address, address, scratch_value);
696  __ sd(value, MemOperand(address));
697  // Update write barrier for the elements array address.
698  __ mov(scratch_value, value); // Preserve the value which is returned.
699  __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved,
701  __ Ret();
702 
703  __ bind(fast_double);
704  if (check_map == kCheckMap) {
705  // Check for fast double array case. If this fails, call through to the
706  // runtime.
707  __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
708  __ Branch(slow, ne, elements_map, Operand(at));
709  }
710 
711  // HOLECHECK: guards "A[i] double hole?"
712  // We have to see if the double version of the hole is present. If so
713  // go to the runtime.
714  __ Daddu(address, elements,
716  kHeapObjectTag));
717  __ SmiScale(at, key, kPointerSizeLog2);
718  __ daddu(address, address, at);
719  __ lw(scratch_value, MemOperand(address));
720  __ Branch(&fast_double_without_map_check, ne, scratch_value,
721  Operand(kHoleNanUpper32));
722  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
723  slow);
724 
725  __ bind(&fast_double_without_map_check);
726  __ StoreNumberToDoubleElements(value, key,
727  elements, // Overwritten.
728  a3, // Scratch regs...
729  a4, a5, &transition_double_elements);
730  if (increment_length == kIncrementLength) {
731  // Add 1 to receiver->length.
732  __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
733  __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
734  }
735  __ Ret();
736 
737  __ bind(&transition_smi_elements);
738  // Transition the array appropriately depending on the value type.
739  __ ld(a4, FieldMemOperand(value, HeapObject::kMapOffset));
740  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
741  __ Branch(&non_double_value, ne, a4, Operand(at));
742 
743  // Value is a double. Transition FAST_SMI_ELEMENTS ->
744  // FAST_DOUBLE_ELEMENTS and complete the store.
745  __ LoadTransitionedArrayMapConditional(
746  FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, a4, slow);
749  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
750  receiver_map, mode, slow);
751  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
752  __ jmp(&fast_double_without_map_check);
753 
754  __ bind(&non_double_value);
755  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
756  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
757  receiver_map, a4, slow);
760  masm, receiver, key, value, receiver_map, mode, slow);
761  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
762  __ jmp(&finish_object_store);
763 
764  __ bind(&transition_double_elements);
765  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
766  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
767  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
768  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
769  receiver_map, a4, slow);
772  masm, receiver, key, value, receiver_map, mode, slow);
773  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
774  __ jmp(&finish_object_store);
775 }
776 
777 
778 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
779  StrictMode strict_mode) {
780  // ---------- S t a t e --------------
781  // -- a0 : value
782  // -- a1 : key
783  // -- a2 : receiver
784  // -- ra : return address
785  // -----------------------------------
786  Label slow, fast_object, fast_object_grow;
787  Label fast_double, fast_double_grow;
788  Label array, extra, check_if_double_array;
789 
790  // Register usage.
791  Register value = StoreDescriptor::ValueRegister();
792  Register key = StoreDescriptor::NameRegister();
793  Register receiver = StoreDescriptor::ReceiverRegister();
794  DCHECK(value.is(a0));
795  Register receiver_map = a3;
796  Register elements_map = a6;
797  Register elements = a7; // Elements array of the receiver.
798  // a4 and a5 are used as general scratch registers.
799 
800  // Check that the key is a smi.
801  __ JumpIfNotSmi(key, &slow);
802  // Check that the object isn't a smi.
803  __ JumpIfSmi(receiver, &slow);
804  // Get the map of the object.
805  __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
806  // Check that the receiver does not require access checks and is not observed.
807  // The generic stub does not perform map checks or handle observed objects.
808  __ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
809  __ And(a4, a4,
810  Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
811  __ Branch(&slow, ne, a4, Operand(zero_reg));
812  // Check if the object is a JS array or not.
813  __ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
814  __ Branch(&array, eq, a4, Operand(JS_ARRAY_TYPE));
815  // Check that the object is some kind of JSObject.
816  __ Branch(&slow, lt, a4, Operand(FIRST_JS_OBJECT_TYPE));
817 
818  // Object case: Check key against length in the elements array.
819  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
820  // Check array bounds. Both the key and the length of FixedArray are smis.
821  __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
822  __ Branch(&fast_object, lo, key, Operand(a4));
823 
824  // Slow case, handle jump to runtime.
825  __ bind(&slow);
826  // Entry registers are intact.
827  // a0: value.
828  // a1: key.
829  // a2: receiver.
831 
832  // Extra capacity case: Check if there is extra capacity to
833  // perform the store and update the length. Used for adding one
834  // element to the array by writing to array[array.length].
835  __ bind(&extra);
836  // Condition code from comparing key and array length is still available.
837  // Only support writing to array[array.length].
838  __ Branch(&slow, ne, key, Operand(a4));
839  // Check for room in the elements backing store.
840  // Both the key and the length of FixedArray are smis.
841  __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
842  __ Branch(&slow, hs, key, Operand(a4));
843  __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
844  __ Branch(&check_if_double_array, ne, elements_map,
845  Heap::kFixedArrayMapRootIndex);
846 
847  __ jmp(&fast_object_grow);
848 
849  __ bind(&check_if_double_array);
850  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
851  __ jmp(&fast_double_grow);
852 
853  // Array case: Get the length and the elements array from the JS
854  // array. Check that the array is in fast mode (and writable); if it
855  // is the length is always a smi.
856  __ bind(&array);
857  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
858 
859  // Check the key against the length in the array.
860  __ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset));
861  __ Branch(&extra, hs, key, Operand(a4));
862 
863  KeyedStoreGenerateGenericHelper(
864  masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
865  value, key, receiver, receiver_map, elements_map, elements);
866  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
867  &slow, kDontCheckMap, kIncrementLength, value,
868  key, receiver, receiver_map, elements_map,
869  elements);
870 }
871 
872 
873 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
874  // Push receiver, key and value for runtime call.
877 
878  ExternalReference ref =
879  ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
880  __ TailCallExternalReference(ref, 3, 1);
881 }
882 
883 
884 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
885  Register receiver = StoreDescriptor::ReceiverRegister();
886  Register name = StoreDescriptor::NameRegister();
887  DCHECK(receiver.is(a1));
888  DCHECK(name.is(a2));
890 
891  // Get the receiver from the stack and probe the stub cache.
893  Code::ComputeHandlerFlags(Code::STORE_IC));
894  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
895  name, a3, a4, a5, a6);
896 
897  // Cache miss: Jump to runtime.
898  GenerateMiss(masm);
899 }
900 
901 
902 void StoreIC::GenerateMiss(MacroAssembler* masm) {
905  // Perform tail call to the entry.
906  ExternalReference ref =
907  ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
908  __ TailCallExternalReference(ref, 3, 1);
909 }
910 
911 
912 void StoreIC::GenerateNormal(MacroAssembler* masm) {
913  Label miss;
914  Register receiver = StoreDescriptor::ReceiverRegister();
915  Register name = StoreDescriptor::NameRegister();
916  Register value = StoreDescriptor::ValueRegister();
917  Register dictionary = a3;
918  DCHECK(!AreAliased(value, receiver, name, dictionary, a4, a5));
919 
920  __ ld(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
921 
922  GenerateDictionaryStore(masm, &miss, a3, name, value, a4, a5);
923  Counters* counters = masm->isolate()->counters();
924  __ IncrementCounter(counters->store_normal_hit(), 1, a4, a5);
925  __ Ret();
926 
927  __ bind(&miss);
928  __ IncrementCounter(counters->store_normal_miss(), 1, a4, a5);
929  GenerateMiss(masm);
930 }
931 
932 
933 #undef __
934 
935 
937  switch (op) {
938  case Token::EQ_STRICT:
939  case Token::EQ:
940  return eq;
941  case Token::LT:
942  return lt;
943  case Token::GT:
944  return gt;
945  case Token::LTE:
946  return le;
947  case Token::GTE:
948  return ge;
949  default:
950  UNREACHABLE();
951  return kNoCondition;
952  }
953 }
954 
955 
957  // The address of the instruction following the call.
958  Address andi_instruction_address =
960 
961  // If the instruction following the call is not a andi at, rx, #yyy, nothing
962  // was inlined.
963  Instr instr = Assembler::instr_at(andi_instruction_address);
964  return Assembler::IsAndImmediate(instr) &&
965  Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
966 }
967 
968 
969 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
970  Address andi_instruction_address =
972 
973  // If the instruction following the call is not a andi at, rx, #yyy, nothing
974  // was inlined.
975  Instr instr = Assembler::instr_at(andi_instruction_address);
976  if (!(Assembler::IsAndImmediate(instr) &&
977  Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
978  return;
979  }
980 
981  // The delta to the start of the map check instruction and the
982  // condition code uses at the patched jump.
983  int delta = Assembler::GetImmediate16(instr);
984  delta += Assembler::GetRs(instr) * kImm16Mask;
985  // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
986  // signals that nothing was inlined.
987  if (delta == 0) {
988  return;
989  }
990 
991  if (FLAG_trace_ic) {
992  PrintF("[ patching ic at %p, andi=%p, delta=%d\n", address,
993  andi_instruction_address, delta);
994  }
995 
996  Address patch_address =
997  andi_instruction_address - delta * Instruction::kInstrSize;
998  Instr instr_at_patch = Assembler::instr_at(patch_address);
999  Instr branch_instr =
1001  // This is patching a conditional "jump if not smi/jump if smi" site.
1002  // Enabling by changing from
1003  // andi at, rx, 0
1004  // Branch <target>, eq, at, Operand(zero_reg)
1005  // to:
1006  // andi at, rx, #kSmiTagMask
1007  // Branch <target>, ne, at, Operand(zero_reg)
1008  // and vice-versa to be disabled again.
1009  CodePatcher patcher(patch_address, 2);
1010  Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
1011  if (check == ENABLE_INLINED_SMI_CHECK) {
1012  DCHECK(Assembler::IsAndImmediate(instr_at_patch));
1013  DCHECK_EQ(0, Assembler::GetImmediate16(instr_at_patch));
1014  patcher.masm()->andi(at, reg, kSmiTagMask);
1015  } else {
1017  DCHECK(Assembler::IsAndImmediate(instr_at_patch));
1018  patcher.masm()->andi(at, reg, 0);
1019  }
1020  DCHECK(Assembler::IsBranch(branch_instr));
1021  if (Assembler::IsBeq(branch_instr)) {
1022  patcher.ChangeBranchCondition(ne);
1023  } else {
1024  DCHECK(Assembler::IsBne(branch_instr));
1025  patcher.ChangeBranchCondition(eq);
1026  }
1027 }
1028 }
1029 } // namespace v8::internal
1030 
1031 #endif // V8_TARGET_ARCH_MIPS64
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1591
static bool IsBne(Instr instr)
static uint32_t GetRt(Instr instr)
static uint32_t GetRs(Instr instr)
static bool IsBranch(Instr instr)
static const int kCallTargetAddressOffset
static uint32_t GetImmediate16(Instr instr)
static bool IsAndImmediate(Instr instr)
static bool IsBeq(Instr instr)
static Flags ComputeHandlerFlags(Kind handler_kind, StubType type=NORMAL, CacheHolderFlag holder=kCacheOnReceiver)
Definition: objects-inl.h:4975
static Flags RemoveTypeAndHolderFromFlags(Flags flags)
Definition: objects-inl.h:5012
uint32_t Flags
Definition: objects.h:4929
static bool HasInlinedSmiCode(Address address)
static Condition ComputeCondition(Token::Value op)
static void GenerateSmiToDouble(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *allocation_memento_found)
static void GenerateDoubleToObject(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static const int kLengthOffset
Definition: objects.h:2392
static const int kHeaderSize
Definition: objects.h:2393
static const int kElementsStartIndex
Definition: objects.h:3274
static const int kMapOffset
Definition: objects.h:1427
Isolate * isolate() const
Definition: ic.h:136
Address address() const
Definition: ic-inl.h:19
Counters * counters()
Definition: isolate.h:857
static const int kLengthOffset
Definition: objects.h:10072
static const int kPropertiesOffset
Definition: objects.h:2193
static const int kElementsOffset
Definition: objects.h:2194
static void GenerateMiss(MacroAssembler *masm)
static void GenerateString(MacroAssembler *masm)
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static void GenerateGeneric(MacroAssembler *masm)
static const int kHashMask
Definition: heap.h:2255
static const int kCapacityMask
Definition: heap.h:2253
static const int kMapHashShift
Definition: heap.h:2254
static const int kEntriesPerBucket
Definition: heap.h:2256
static void GenerateMiss(MacroAssembler *masm)
static void GenerateSloppyArguments(MacroAssembler *masm)
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
static const Register ReceiverRegister()
static const Register NameRegister()
static void GenerateNormal(MacroAssembler *masm)
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)
static const int kHasIndexedInterceptor
Definition: objects.h:6243
static const int kBitFieldOffset
Definition: objects.h:6228
static const int kIsAccessCheckNeeded
Definition: objects.h:6246
static const int kInstanceTypeOffset
Definition: objects.h:6229
static const int kInstanceSizeOffset
Definition: objects.h:6210
static const int kInObjectPropertiesOffset
Definition: objects.h:6212
static const int kIsObserved
Definition: objects.h:6245
static const int kHasNamedInterceptor
Definition: objects.h:6242
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kHashShift
Definition: objects.h:8499
static const int kHashFieldOffset
Definition: objects.h:8486
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8528
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
static const Register ReceiverRegister()
static const Register NameRegister()
static const Register ValueRegister()
static void GenerateMiss(MacroAssembler *masm)
static void GenerateMegamorphic(MacroAssembler *masm)
StrictMode strict_mode() const
Definition: ic.h:465
static void GenerateNormal(MacroAssembler *masm)
#define __
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
const int kPointerSize
Definition: globals.h:129
@ DONT_DO_SMI_CHECK
Definition: globals.h:640
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const int kSmiTagSize
Definition: v8.h:5743
InlinedSmiCheck
Definition: ic.h:672
@ DISABLE_INLINED_SMI_CHECK
Definition: ic.h:672
@ ENABLE_INLINED_SMI_CHECK
Definition: ic.h:672
const int kPointerSizeLog2
Definition: globals.h:147
@ JS_VALUE_TYPE
Definition: objects.h:728
@ JS_GLOBAL_PROXY_TYPE
Definition: objects.h:737
@ JS_ARRAY_TYPE
Definition: objects.h:738
@ JS_OBJECT_TYPE
Definition: objects.h:731
@ FIRST_NONSTRING_TYPE
Definition: objects.h:758
@ FIRST_JS_RECEIVER_TYPE
Definition: objects.h:772
@ LAST_UNIQUE_NAME_TYPE
Definition: objects.h:757
@ FIRST_JS_OBJECT_TYPE
Definition: objects.h:775
@ JS_GLOBAL_OBJECT_TYPE
Definition: objects.h:735
@ JS_BUILTINS_OBJECT_TYPE
Definition: objects.h:736
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check)
MemOperand FieldMemOperand(Register object, int offset)
byte * Address
Definition: globals.h:101
void PrintF(const char *format,...)
Definition: utils.cc:80
const int kHeapObjectTag
Definition: v8.h:5737
const uint32_t kInternalizedTag
Definition: objects.h:551
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const intptr_t kSmiTagMask
Definition: v8.h:5744
const uint32_t kIsNotInternalizedMask
Definition: objects.h:549
const int kSmiTag
Definition: v8.h:5742
KeyedStoreCheckMap
Definition: ic.h:524
@ kDontCheckMap
Definition: ic.h:524
@ kCheckMap
Definition: ic.h:524
const uint32_t kHoleNanLower32
Definition: globals.h:657
@ STRING_INDEX_IS_ARRAY_INDEX
Definition: code-stubs.h:1595
const uint32_t kHoleNanUpper32
Definition: globals.h:656
KeyedStoreIncrementLength
Definition: ic.h:527
@ kDontIncrementLength
Definition: ic.h:527
@ kIncrementLength
Definition: ic.h:527
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
@ READ_ONLY
static Register from_code(int code)