19 class SafepointGenerator
FINAL :
public CallWrapper {
22 LPointerMap* pointers,
23 Safepoint::DeoptMode
mode)
32 codegen_->RecordSafepoint(pointers_, deopt_mode_);
37 LPointerMap* pointers_;
38 Safepoint::DeoptMode deopt_mode_;
45 LPhase phase(
"Z_Code generation",
chunk());
62 code->set_safepoint_table_offset(
safepoints_.GetCodeOffset());
63 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
69 DCHECK(info()->saves_caller_doubles());
71 Comment(
";;; Save clobbered callee double registers");
73 BitVector* doubles =
chunk()->allocated_double_registers();
74 BitVector::Iterator save_iterator(doubles);
75 while (!save_iterator.Done()) {
78 save_iterator.Advance();
85 DCHECK(info()->saves_caller_doubles());
87 Comment(
";;; Restore clobbered callee double registers");
88 BitVector* doubles =
chunk()->allocated_double_registers();
89 BitVector::Iterator save_iterator(doubles);
91 while (!save_iterator.Done()) {
94 save_iterator.Advance();
103 if (info()->IsOptimizing()) {
107 if (strlen(FLAG_stop_at) > 0 &&
108 info_->function()->name()->IsUtf8EqualTo(
CStrVector(FLAG_stop_at))) {
121 if (info_->this_has_uses() &&
122 info_->strict_mode() ==
SLOPPY &&
123 !info_->is_native()) {
125 int receiver_offset = info_->scope()->num_parameters() *
kPointerSize;
126 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
128 __ Branch(&ok,
ne, a2, Operand(at));
139 info()->set_prologue_offset(masm_->pc_offset());
141 if (info()->IsStub()) {
144 __ Prologue(info()->IsCodePreAgingActive());
147 info_->AddNoFrameRange(0, masm_->pc_offset());
153 if (FLAG_debug_code) {
162 __ Branch(&loop,
ne, a0, Operand(
sp));
169 if (info()->saves_caller_doubles()) {
175 if (heap_slots > 0) {
176 Comment(
";;; Allocate local context");
177 bool need_write_barrier =
true;
179 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
180 FastNewContextStub stub(isolate(), heap_slots);
183 need_write_barrier =
false;
195 for (
int i = 0;
i < num_parameters;
i++) {
197 if (var->IsContextSlot()) {
206 if (need_write_barrier) {
207 __ RecordWriteContextSlot(
209 }
else if (FLAG_debug_code) {
211 __ JumpIfInNewSpace(
cp, a0, &done);
212 __ Abort(kExpectedNewSpaceObject);
217 Comment(
";;; End allocate local context");
221 if (FLAG_trace && info()->IsOptimizing()) {
226 return !is_aborted();
246 if (instr->IsCall()) {
249 if (!instr->IsLazyBailout() && !instr->IsGap()) {
258 for (
int i = 0; !is_aborted() &&
i <
deferred_.length();
i++) {
262 instructions_->at(code->instruction_index())->hydrogen_value();
264 chunk()->
graph()->SourcePositionToScriptPosition(value->position()));
266 Comment(
";;; <@%d,#%d> "
267 "-------------------- Deferred %s --------------------",
268 code->instruction_index(),
269 code->instr()->hydrogen_value()->id(),
270 code->instr()->Mnemonic());
271 __ bind(code->entry());
273 Comment(
";;; Build frame");
282 Comment(
";;; Deferred code");
286 Comment(
";;; Destroy frame");
292 __ jmp(code->exit());
297 if (!is_aborted()) status_ =
DONE;
298 return !is_aborted();
304 Comment(
";;; -------------------- Jump table --------------------");
308 __ bind(&table_start);
311 Deoptimizer::JumpTableEntry* table_entry = &
jump_table_[
i];
312 __ bind(&table_entry->label);
313 Address entry = table_entry->address;
314 DeoptComment(table_entry->reason);
315 __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
316 if (table_entry->needs_frame) {
317 DCHECK(!info()->saves_caller_doubles());
318 if (needs_frame.is_bound()) {
319 __ Branch(&needs_frame);
321 __ bind(&needs_frame);
334 if (info()->saves_caller_doubles()) {
341 __ RecordComment(
"]");
345 if (!is_aborted()) status_ =
DONE;
346 return !is_aborted();
353 return !is_aborted();
374 if (op->IsRegister()) {
376 }
else if (op->IsConstantOperand()) {
377 LConstantOperand* const_op = LConstantOperand::cast(op);
378 HConstant* constant = chunk_->LookupConstant(const_op);
379 Handle<Object> literal = constant->handle(isolate());
380 Representation r = chunk_->LookupLiteralRepresentation(const_op);
381 if (r.IsInteger32()) {
382 DCHECK(literal->IsNumber());
383 __ li(scratch, Operand(
static_cast<int32_t>(literal->Number())));
384 }
else if (r.IsSmi()) {
385 DCHECK(constant->HasSmiValue());
386 __ li(scratch, Operand(
Smi::FromInt(constant->Integer32Value())));
387 }
else if (r.IsDouble()) {
388 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
390 DCHECK(r.IsSmiOrTagged());
391 __ li(scratch, literal);
394 }
else if (op->IsStackSlot()) {
404 DCHECK(op->IsDoubleRegister());
412 if (op->IsDoubleRegister()) {
414 }
else if (op->IsConstantOperand()) {
415 LConstantOperand* const_op = LConstantOperand::cast(op);
416 HConstant* constant = chunk_->LookupConstant(const_op);
417 Handle<Object> literal = constant->handle(isolate());
418 Representation r = chunk_->LookupLiteralRepresentation(const_op);
419 if (r.IsInteger32()) {
420 DCHECK(literal->IsNumber());
421 __ li(at, Operand(
static_cast<int32_t>(literal->Number())));
422 __ mtc1(at, flt_scratch);
423 __ cvt_d_w(dbl_scratch, flt_scratch);
425 }
else if (r.IsDouble()) {
426 Abort(kUnsupportedDoubleImmediate);
427 }
else if (r.IsTagged()) {
428 Abort(kUnsupportedTaggedImmediate);
430 }
else if (op->IsStackSlot()) {
432 __ ldc1(dbl_scratch, mem_op);
441 HConstant* constant = chunk_->LookupConstant(op);
442 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
443 return constant->handle(isolate());
448 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
453 return chunk_->LookupLiteralRepresentation(op).IsSmi();
459 HConstant* constant = chunk_->LookupConstant(op);
460 return constant->Integer32Value();
466 HConstant* constant = chunk_->LookupConstant(op);
467 int32_t value = constant->Integer32Value();
475 HConstant* constant = chunk_->LookupConstant(op);
481 HConstant* constant = chunk_->LookupConstant(op);
482 DCHECK(constant->HasDoubleValue());
483 return constant->DoubleValue();
488 if (op->IsConstantOperand()) {
489 LConstantOperand* const_op = LConstantOperand::cast(op);
490 HConstant* constant =
chunk()->LookupConstant(const_op);
491 Representation r = chunk_->LookupLiteralRepresentation(const_op);
493 DCHECK(constant->HasSmiValue());
494 return Operand(
Smi::FromInt(constant->Integer32Value()));
495 }
else if (r.IsInteger32()) {
496 DCHECK(constant->HasInteger32Value());
497 return Operand(constant->Integer32Value());
498 }
else if (r.IsDouble()) {
499 Abort(kToOperandUnsupportedDoubleImmediate);
502 return Operand(constant->handle(isolate()));
503 }
else if (op->IsRegister()) {
505 }
else if (op->IsDoubleRegister()) {
506 Abort(kToOperandIsDoubleRegisterUnimplemented);
507 return Operand((int64_t)0);
511 return Operand((int64_t)0);
522 DCHECK(!op->IsRegister());
523 DCHECK(!op->IsDoubleRegister());
524 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
536 DCHECK(op->IsDoubleStackSlot());
552 Translation* translation) {
553 if (environment ==
NULL)
return;
556 int translation_size = environment->translation_size();
558 int height = translation_size - environment->parameter_count();
561 bool has_closure_id = !info()->closure().is_null() &&
562 !info()->closure().is_identical_to(environment->closure());
563 int closure_id = has_closure_id
565 : Translation::kSelfLiteralId;
567 switch (environment->frame_type()) {
569 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
572 translation->BeginConstructStubFrame(closure_id, translation_size);
575 DCHECK(translation_size == 1);
577 translation->BeginGetterStubFrame(closure_id);
580 DCHECK(translation_size == 2);
582 translation->BeginSetterStubFrame(closure_id);
585 translation->BeginCompiledStubFrame();
588 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
592 int object_index = 0;
593 int dematerialized_index = 0;
594 for (
int i = 0;
i < translation_size; ++
i) {
595 LOperand* value = environment->values()->at(
i);
599 environment->HasTaggedValueAt(
i),
600 environment->HasUint32ValueAt(
i),
602 &dematerialized_index);
608 Translation* translation,
612 int* object_index_pointer,
613 int* dematerialized_index_pointer) {
614 if (op == LEnvironment::materialization_marker()) {
615 int object_index = (*object_index_pointer)++;
616 if (environment->ObjectIsDuplicateAt(object_index)) {
617 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
618 translation->DuplicateObject(dupe_of);
621 int object_length = environment->ObjectLengthAt(object_index);
622 if (environment->ObjectIsArgumentsAt(object_index)) {
623 translation->BeginArgumentsObject(object_length);
625 translation->BeginCapturedObject(object_length);
627 int dematerialized_index = *dematerialized_index_pointer;
628 int env_offset = environment->translation_size() + dematerialized_index;
629 *dematerialized_index_pointer += object_length;
630 for (
int i = 0;
i < object_length; ++
i) {
631 LOperand* value = environment->values()->at(env_offset +
i);
635 environment->HasTaggedValueAt(env_offset +
i),
636 environment->HasUint32ValueAt(env_offset +
i),
637 object_index_pointer,
638 dematerialized_index_pointer);
643 if (op->IsStackSlot()) {
645 translation->StoreStackSlot(op->index());
646 }
else if (is_uint32) {
647 translation->StoreUint32StackSlot(op->index());
649 translation->StoreInt32StackSlot(op->index());
651 }
else if (op->IsDoubleStackSlot()) {
652 translation->StoreDoubleStackSlot(op->index());
653 }
else if (op->IsRegister()) {
656 translation->StoreRegister(reg);
657 }
else if (is_uint32) {
658 translation->StoreUint32Register(reg);
660 translation->StoreInt32Register(reg);
662 }
else if (op->IsDoubleRegister()) {
664 translation->StoreDoubleRegister(reg);
665 }
else if (op->IsConstantOperand()) {
666 HConstant* constant =
chunk()->LookupConstant(LConstantOperand::cast(op));
668 translation->StoreLiteral(src_index);
677 LInstruction* instr) {
685 SafepointMode safepoint_mode) {
705 if (context->IsRegister()) {
707 }
else if (context->IsStackSlot()) {
709 }
else if (context->IsConstantOperand()) {
710 HConstant* constant =
711 chunk_->LookupConstant(LConstantOperand::cast(context));
724 __ CallRuntimeSaveDoubles(
id);
726 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
731 Safepoint::DeoptMode
mode) {
732 environment->set_has_been_used();
733 if (!environment->HasBeenRegistered()) {
748 int jsframe_count = 0;
755 Translation translation(&
translations_, frame_count, jsframe_count, zone());
758 int pc_offset = masm()->pc_offset();
759 environment->Register(deoptimization_index,
761 (
mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
769 const char* detail, Register src1,
770 const Operand& src2) {
773 DCHECK(environment->HasBeenRegistered());
774 int id = environment->deoptimization_index();
775 DCHECK(info()->IsOptimizing() || info()->IsStub());
779 Abort(kBailoutWasNotPrepared);
783 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
785 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
788 __ li(scratch, Operand(count));
790 __ Subu(a1, a1, Operand(1));
791 __ Branch(&no_deopt,
ne, a1, Operand(zero_reg));
792 __ li(a1, Operand(FLAG_deopt_every_n_times));
802 if (info()->ShouldTrapOnDeopt()) {
804 if (condition !=
al) {
807 __ stop(
"trap_on_deopt");
811 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
812 instr->Mnemonic(), detail);
817 !info()->saves_caller_doubles()) {
818 DeoptComment(reason);
821 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
835 const char* detail, Register src1,
836 const Operand& src2) {
840 DeoptimizeIf(condition, instr, bailout_type, detail, src1, src2);
846 if (length == 0)
return;
847 Handle<DeoptimizationInputData> data =
850 Handle<ByteArray> translations =
852 data->SetTranslationByteArray(*translations);
854 data->SetOptimizationId(
Smi::FromInt(info_->optimization_id()));
855 if (info_->IsOptimizing()) {
858 data->SetSharedFunctionInfo(*info_->shared_info());
872 data->SetOsrAstId(
Smi::FromInt(info_->osr_ast_id().ToInt()));
876 for (
int i = 0;
i < length;
i++) {
878 data->SetAstId(
i, env->ast_id());
879 data->SetTranslationIndex(
i,
Smi::FromInt(env->translation_index()));
880 data->SetArgumentsStackHeight(
i,
884 code->set_deoptimization_data(*data);
901 const ZoneList<Handle<JSFunction> >* inlined_closures =
902 chunk()->inlined_closures();
904 for (
int i = 0, length = inlined_closures->length();
915 LInstruction* instr, SafepointMode safepoint_mode) {
921 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
927 LPointerMap* pointers,
928 Safepoint::Kind kind,
930 Safepoint::DeoptMode deopt_mode) {
933 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
934 Safepoint safepoint =
safepoints_.DefineSafepoint(masm(),
935 kind, arguments, deopt_mode);
936 for (
int i = 0;
i < operands->length();
i++) {
937 LOperand* pointer = operands->at(
i);
938 if (pointer->IsStackSlot()) {
939 safepoint.DefinePointerSlot(pointer->index(), zone());
940 }
else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
941 safepoint.DefinePointerRegister(
ToRegister(pointer), zone());
948 Safepoint::DeoptMode deopt_mode) {
954 LPointerMap empty_pointers(zone());
961 Safepoint::DeoptMode deopt_mode) {
963 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
969 masm()->positions_recorder()->RecordPosition(position);
970 masm()->positions_recorder()->WriteRecordedPositions();
975 if (label->is_loop_header())
return " (loop header)";
976 if (label->is_osr_entry())
return " (OSR entry)";
981 void LCodeGen::DoLabel(LLabel* label) {
982 Comment(
";;; <@%d,#%d> -------------------- B%d%s --------------------",
983 current_instruction_,
984 label->hydrogen_value()->id(),
987 __ bind(label->label());
988 current_block_ = label->block_id();
1003 LParallelMove* move = gap->GetParallelMove(inner_pos);
1009 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1014 void LCodeGen::DoParameter(LParameter* instr) {
1019 void LCodeGen::DoCallStub(LCallStub* instr) {
1022 switch (instr->hydrogen()->major_key()) {
1023 case CodeStub::RegExpExec: {
1024 RegExpExecStub stub(isolate());
1028 case CodeStub::SubString: {
1029 SubStringStub stub(isolate());
1033 case CodeStub::StringCompare: {
1034 StringCompareStub stub(isolate());
1044 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1049 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1050 Register dividend =
ToRegister(instr->dividend());
1051 int32_t divisor = instr->divisor();
1060 HMod* hmod = instr->hydrogen();
1061 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1062 Label dividend_is_not_negative, done;
1065 __ Branch(÷nd_is_not_negative,
ge, dividend, Operand(zero_reg));
1067 __ dsubu(dividend, zero_reg, dividend);
1068 __ And(dividend, dividend, Operand(mask));
1070 DeoptimizeIf(
eq, instr,
"minus zero", dividend, Operand(zero_reg));
1073 __ dsubu(dividend, zero_reg, dividend);
1076 __ bind(÷nd_is_not_negative);
1077 __ And(dividend, dividend, Operand(mask));
1082 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1083 Register dividend =
ToRegister(instr->dividend());
1084 int32_t divisor = instr->divisor();
1085 Register result =
ToRegister(instr->result());
1086 DCHECK(!dividend.is(result));
1093 __ TruncatingDiv(result, dividend,
Abs(divisor));
1094 __ Dmul(result, result, Operand(
Abs(divisor)));
1095 __ Dsubu(result, dividend, Operand(result));
1098 HMod* hmod = instr->hydrogen();
1100 Label remainder_not_zero;
1101 __ Branch(&remainder_not_zero,
ne, result, Operand(zero_reg));
1102 DeoptimizeIf(
lt, instr,
"minus zero", dividend, Operand(zero_reg));
1103 __ bind(&remainder_not_zero);
1108 void LCodeGen::DoModI(LModI* instr) {
1109 HMod* hmod = instr->hydrogen();
1110 const Register left_reg =
ToRegister(instr->left());
1111 const Register right_reg =
ToRegister(instr->right());
1112 const Register result_reg =
ToRegister(instr->result());
1115 __ Dmod(result_reg, left_reg, right_reg);
1121 DeoptimizeIf(
eq, instr,
"division by zero", right_reg, Operand(zero_reg));
1127 Label no_overflow_possible;
1128 __ Branch(&no_overflow_possible,
ne, left_reg, Operand(
kMinInt));
1132 __ Branch(&no_overflow_possible,
ne, right_reg, Operand(-1));
1134 __ mov(result_reg, zero_reg);
1136 __ bind(&no_overflow_possible);
1140 __ Branch(&done,
ge, left_reg, Operand(zero_reg));
1143 DeoptimizeIf(
eq, instr,
"minus zero", result_reg, Operand(zero_reg));
1149 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1150 Register dividend =
ToRegister(instr->dividend());
1151 int32_t divisor = instr->divisor();
1152 Register result =
ToRegister(instr->result());
1154 DCHECK(!result.is(dividend));
1157 HDiv* hdiv = instr->hydrogen();
1159 DeoptimizeIf(
eq, instr,
"minus zero", dividend, Operand(zero_reg));
1167 divisor != 1 && divisor != -1) {
1168 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1169 __ And(at, dividend, Operand(mask));
1170 DeoptimizeIf(
ne, instr,
"lost precision", at, Operand(zero_reg));
1173 if (divisor == -1) {
1174 __ Dsubu(result, zero_reg, dividend);
1179 __ Move(result, dividend);
1180 }
else if (
shift == 1) {
1181 __ dsrl32(result, dividend, 31);
1182 __ Daddu(result, dividend, Operand(result));
1184 __ dsra32(result, dividend, 31);
1185 __ dsrl32(result, result, 32 -
shift);
1186 __ Daddu(result, dividend, Operand(result));
1189 if (divisor < 0)
__ Dsubu(result, zero_reg, result);
1193 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1194 Register dividend =
ToRegister(instr->dividend());
1195 int32_t divisor = instr->divisor();
1196 Register result =
ToRegister(instr->result());
1197 DCHECK(!dividend.is(result));
1205 HDiv* hdiv = instr->hydrogen();
1207 DeoptimizeIf(
eq, instr,
"minus zero", dividend, Operand(zero_reg));
1210 __ TruncatingDiv(result, dividend,
Abs(divisor));
1211 if (divisor < 0)
__ Subu(result, zero_reg, result);
1214 __ Dmul(
scratch0(), result, Operand(divisor));
1222 void LCodeGen::DoDivI(LDivI* instr) {
1223 HBinaryOperation* hdiv = instr->hydrogen();
1224 Register dividend =
ToRegister(instr->dividend());
1225 Register divisor =
ToRegister(instr->divisor());
1226 const Register result =
ToRegister(instr->result());
1230 __ Ddiv(result, dividend, divisor);
1234 DeoptimizeIf(
eq, instr,
"division by zero", divisor, Operand(zero_reg));
1239 Label left_not_zero;
1240 __ Branch(&left_not_zero,
ne, dividend, Operand(zero_reg));
1241 DeoptimizeIf(
lt, instr,
"minus zero", divisor, Operand(zero_reg));
1242 __ bind(&left_not_zero);
1248 Label left_not_min_int;
1249 __ Branch(&left_not_min_int,
ne, dividend, Operand(
kMinInt));
1251 __ bind(&left_not_min_int);
1256 Register remainder =
ToRegister(instr->temp());
1260 __ dmod(remainder, dividend, divisor);
1262 DeoptimizeIf(
ne, instr,
"lost precision", remainder, Operand(zero_reg));
1267 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1279 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1280 Register dividend =
ToRegister(instr->dividend());
1281 Register result =
ToRegister(instr->result());
1282 int32_t divisor = instr->divisor();
1283 Register scratch = result.is(dividend) ?
scratch0() : dividend;
1284 DCHECK(!result.is(dividend) || !scratch.is(dividend));
1288 __ Move(result, dividend);
1296 __ dsra(result, dividend,
shift);
1303 __ Move(scratch, dividend);
1305 __ Dsubu(result, zero_reg, dividend);
1307 DeoptimizeIf(
eq, instr,
"minus zero", result, Operand(zero_reg));
1310 __ Xor(scratch, scratch, result);
1312 if (divisor == -1) {
1321 __ dsra(result, result,
shift);
1330 __ dsra(result, result,
shift);
1335 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1336 Register dividend =
ToRegister(instr->dividend());
1337 int32_t divisor = instr->divisor();
1338 Register result =
ToRegister(instr->result());
1339 DCHECK(!dividend.is(result));
1347 HMathFloorOfDiv* hdiv = instr->hydrogen();
1349 DeoptimizeIf(
eq, instr,
"minus zero", dividend, Operand(zero_reg));
1356 __ TruncatingDiv(result, dividend,
Abs(divisor));
1357 if (divisor < 0)
__ Dsubu(result, zero_reg, result);
1364 DCHECK(!temp.is(dividend) && !temp.is(result));
1365 Label needs_adjustment, done;
1366 __ Branch(&needs_adjustment, divisor > 0 ?
lt :
gt,
1367 dividend, Operand(zero_reg));
1368 __ TruncatingDiv(result, dividend,
Abs(divisor));
1369 if (divisor < 0)
__ Dsubu(result, zero_reg, result);
1371 __ bind(&needs_adjustment);
1372 __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1373 __ TruncatingDiv(result, temp,
Abs(divisor));
1374 if (divisor < 0)
__ Dsubu(result, zero_reg, result);
1375 __ Dsubu(result, result, Operand(1));
1381 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1382 HBinaryOperation* hdiv = instr->hydrogen();
1383 Register dividend =
ToRegister(instr->dividend());
1384 Register divisor =
ToRegister(instr->divisor());
1385 const Register result =
ToRegister(instr->result());
1389 __ Ddiv(result, dividend, divisor);
1393 DeoptimizeIf(
eq, instr,
"division by zero", divisor, Operand(zero_reg));
1398 Label left_not_zero;
1399 __ Branch(&left_not_zero,
ne, dividend, Operand(zero_reg));
1400 DeoptimizeIf(
lt, instr,
"minus zero", divisor, Operand(zero_reg));
1401 __ bind(&left_not_zero);
1407 Label left_not_min_int;
1408 __ Branch(&left_not_min_int,
ne, dividend, Operand(
kMinInt));
1410 __ bind(&left_not_min_int);
1419 __ dmod(remainder, dividend, divisor);
1422 __ Xor(remainder, remainder, Operand(divisor));
1423 __ Branch(&done,
ge, remainder, Operand(zero_reg));
1424 __ Dsubu(result, result, Operand(1));
1429 void LCodeGen::DoMulI(LMulI* instr) {
1431 Register result =
ToRegister(instr->result());
1434 LOperand* right_op = instr->right();
1436 bool bailout_on_minus_zero =
1440 if (right_op->IsConstantOperand()) {
1443 if (bailout_on_minus_zero && (constant < 0)) {
1452 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1455 __ Dsubu(result, zero_reg, left);
1459 if (bailout_on_minus_zero) {
1464 __ mov(result, zero_reg);
1468 __ Move(result, left);
1474 int32_t mask = constant >> 31;
1475 uint32_t constant_abs = (constant + mask) ^ mask;
1481 if (constant < 0)
__ Dsubu(result, zero_reg, result);
1484 __ dsll(scratch, left,
shift);
1485 __ Daddu(result, scratch, left);
1487 if (constant < 0)
__ Dsubu(result, zero_reg, result);
1490 __ dsll(scratch, left,
shift);
1491 __ Dsubu(result, scratch, left);
1493 if (constant < 0)
__ Dsubu(result, zero_reg, result);
1496 __ li(at, constant);
1497 __ Dmul(result, left, at);
1502 DCHECK(right_op->IsRegister());
1507 if (instr->hydrogen()->representation().IsSmi()) {
1508 __ Dmulh(result, left, right);
1510 __ Dmul(result, left, right);
1512 __ dsra32(scratch, result, 0);
1513 __ sra(at, result, 31);
1514 if (instr->hydrogen()->representation().IsSmi()) {
1519 if (instr->hydrogen()->representation().IsSmi()) {
1520 __ SmiUntag(result, left);
1521 __ Dmul(result, result, right);
1523 __ Dmul(result, left, right);
1527 if (bailout_on_minus_zero) {
1529 __ Xor(at, left, right);
1530 __ Branch(&done,
ge, at, Operand(zero_reg));
1532 DeoptimizeIf(
eq, instr,
"minus zero", result, Operand(zero_reg));
1539 void LCodeGen::DoBitI(LBitI* instr) {
1540 LOperand* left_op = instr->left();
1541 LOperand* right_op = instr->right();
1542 DCHECK(left_op->IsRegister());
1544 Register result =
ToRegister(instr->result());
1547 if (right_op->IsStackSlot()) {
1550 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1554 switch (instr->op()) {
1555 case Token::BIT_AND:
1556 __ And(result, left, right);
1559 __ Or(result, left, right);
1561 case Token::BIT_XOR:
1562 if (right_op->IsConstantOperand() && right.immediate() ==
int32_t(~0)) {
1563 __ Nor(result, zero_reg, left);
1565 __ Xor(result, left, right);
1575 void LCodeGen::DoShiftI(LShiftI* instr) {
1578 LOperand* right_op = instr->right();
1580 Register result =
ToRegister(instr->result());
1582 if (right_op->IsRegister()) {
1585 switch (instr->op()) {
1594 if (instr->can_deopt()) {
1596 DeoptimizeIf(
lt, instr,
"negative value", result, Operand(zero_reg));
1609 int value =
ToInteger32(LConstantOperand::cast(right_op));
1610 uint8_t shift_count =
static_cast<uint8_t
>(value & 0x1F);
1611 switch (instr->op()) {
1613 if (shift_count != 0) {
1614 __ Ror(result, left, Operand(shift_count));
1616 __ Move(result, left);
1620 if (shift_count != 0) {
1621 __ sra(result, left, shift_count);
1623 __ Move(result, left);
1627 if (shift_count != 0) {
1628 __ srl(result, left, shift_count);
1630 if (instr->can_deopt()) {
1631 __ And(at, left, Operand(0x80000000));
1632 DeoptimizeIf(
ne, instr,
"negative value", at, Operand(zero_reg));
1634 __ Move(result, left);
1638 if (shift_count != 0) {
1639 if (instr->hydrogen_value()->representation().IsSmi()) {
1640 __ dsll(result, left, shift_count);
1642 __ sll(result, left, shift_count);
1645 __ Move(result, left);
1656 void LCodeGen::DoSubI(LSubI* instr) {
1657 LOperand* left = instr->left();
1658 LOperand* right = instr->right();
1659 LOperand* result = instr->result();
1662 if (!can_overflow) {
1663 if (right->IsStackSlot()) {
1667 DCHECK(right->IsRegister() || right->IsConstantOperand());
1673 if (right->IsStackSlot() || right->IsConstantOperand()) {
1680 DCHECK(right->IsRegister());
1689 if (!instr->hydrogen()->representation().IsSmi()) {
1697 void LCodeGen::DoConstantI(LConstantI* instr) {
1698 __ li(
ToRegister(instr->result()), Operand(instr->value()));
1702 void LCodeGen::DoConstantS(LConstantS* instr) {
1703 __ li(
ToRegister(instr->result()), Operand(instr->value()));
1707 void LCodeGen::DoConstantD(LConstantD* instr) {
1708 DCHECK(instr->result()->IsDoubleRegister());
1710 double v = instr->value();
1715 void LCodeGen::DoConstantE(LConstantE* instr) {
1716 __ li(
ToRegister(instr->result()), Operand(instr->value()));
1720 void LCodeGen::DoConstantT(LConstantT* instr) {
1721 Handle<Object>
object = instr->value(isolate());
1727 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1728 Register result =
ToRegister(instr->result());
1730 __ EnumLength(result,
map);
1734 void LCodeGen::DoDateField(LDateField* instr) {
1736 Register result =
ToRegister(instr->result());
1737 Register scratch =
ToRegister(instr->temp());
1738 Smi* index = instr->index();
1739 Label runtime, done;
1743 DCHECK(!scratch.is(
object));
1745 __ SmiTst(
object, at);
1747 __ GetObjectType(
object, scratch, scratch);
1750 if (index->value() == 0) {
1754 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1755 __ li(scratch, Operand(stamp));
1764 __ PrepareCallCFunction(2, scratch);
1765 __ li(a1, Operand(index));
1766 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1775 if (index->IsConstantOperand()) {
1776 int offset =
ToInteger32(LConstantOperand::cast(index));
1784 DCHECK(!scratch.is(
string));
1791 __ Daddu(scratch,
string, scratch);
1797 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1799 Register
string =
ToRegister(instr->string());
1800 Register result =
ToRegister(instr->result());
1802 if (FLAG_debug_code) {
1807 __ And(scratch, scratch,
1812 ? one_byte_seq_type : two_byte_seq_type));
1813 __ Check(
eq, kUnexpectedStringType, at, Operand(zero_reg));
1818 __ lbu(result, operand);
1820 __ lhu(result, operand);
1825 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1827 Register
string =
ToRegister(instr->string());
1830 if (FLAG_debug_code) {
1837 ? one_byte_seq_type : two_byte_seq_type;
1838 __ EmitSeqStringSetCharCheck(
string, index, value, scratch, encoding_mask);
1843 __ sb(value, operand);
1845 __ sh(value, operand);
1850 void LCodeGen::DoAddI(LAddI* instr) {
1851 LOperand* left = instr->left();
1852 LOperand* right = instr->right();
1853 LOperand* result = instr->result();
1856 if (!can_overflow) {
1857 if (right->IsStackSlot()) {
1861 DCHECK(right->IsRegister() || right->IsConstantOperand());
1867 if (right->IsStackSlot() ||
1868 right->IsConstantOperand()) {
1875 DCHECK(right->IsRegister());
1885 if (!instr->hydrogen()->representation().IsSmi()) {
1893 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1894 LOperand* left = instr->left();
1895 LOperand* right = instr->right();
1896 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1897 Condition condition = (operation == HMathMinMax::kMathMin) ?
le :
ge;
1898 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1901 Register result_reg =
ToRegister(instr->result());
1902 Label return_right, done;
1904 __ Slt(scratch, left_reg, Operand(right_reg));
1905 if (condition ==
ge) {
1906 __ Movz(result_reg, left_reg, scratch);
1907 __ Movn(result_reg, right_reg, scratch);
1910 __ Movn(result_reg, left_reg, scratch);
1911 __ Movz(result_reg, right_reg, scratch);
1914 DCHECK(instr->hydrogen()->representation().IsDouble());
1918 Label check_nan_left, check_zero, return_left, return_right, done;
1919 __ BranchF(&check_zero, &check_nan_left,
eq, left_reg, right_reg);
1920 __ BranchF(&return_left,
NULL, condition, left_reg, right_reg);
1921 __ Branch(&return_right);
1923 __ bind(&check_zero);
1927 if (operation == HMathMinMax::kMathMin) {
1928 __ neg_d(left_reg, left_reg);
1929 __ sub_d(result_reg, left_reg, right_reg);
1930 __ neg_d(result_reg, result_reg);
1932 __ add_d(result_reg, left_reg, right_reg);
1936 __ bind(&check_nan_left);
1938 __ BranchF(
NULL, &return_left,
eq, left_reg, left_reg);
1939 __ bind(&return_right);
1940 if (!right_reg.is(result_reg)) {
1941 __ mov_d(result_reg, right_reg);
1945 __ bind(&return_left);
1946 if (!left_reg.is(result_reg)) {
1947 __ mov_d(result_reg, left_reg);
1954 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1958 switch (instr->op()) {
1960 __ add_d(result, left, right);
1963 __ sub_d(result, left, right);
1966 __ mul_d(result, left, right);
1969 __ div_d(result, left, right);
1973 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1974 __ MultiPush(saved_regs);
1977 __ MovToFloatParameters(left, right);
1979 ExternalReference::mod_two_doubles_operation(isolate()),
1982 __ MovFromFloatResult(result);
1985 __ MultiPop(saved_regs);
1995 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2002 CodeFactory::BinaryOpIC(isolate(), instr->op(),
NO_OVERWRITE).code();
2010 template<
class InstrType>
2014 const Operand& src2) {
2015 int left_block = instr->TrueDestination(chunk_);
2016 int right_block = instr->FalseDestination(chunk_);
2018 int next_block = GetNextEmittedBlock();
2019 if (right_block == left_block || condition ==
al) {
2021 }
else if (left_block == next_block) {
2022 __ Branch(chunk_->GetAssemblyLabel(right_block),
2024 }
else if (right_block == next_block) {
2025 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2027 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2028 __ Branch(chunk_->GetAssemblyLabel(right_block));
2033 template<
class InstrType>
2038 int right_block = instr->FalseDestination(chunk_);
2039 int left_block = instr->TrueDestination(chunk_);
2041 int next_block = GetNextEmittedBlock();
2042 if (right_block == left_block) {
2044 }
else if (left_block == next_block) {
2045 __ BranchF(chunk_->GetAssemblyLabel(right_block),
NULL,
2047 }
else if (right_block == next_block) {
2048 __ BranchF(chunk_->GetAssemblyLabel(left_block),
NULL,
2049 condition, src1, src2);
2051 __ BranchF(chunk_->GetAssemblyLabel(left_block),
NULL,
2052 condition, src1, src2);
2053 __ Branch(chunk_->GetAssemblyLabel(right_block));
2058 template<
class InstrType>
2062 const Operand& src2) {
2063 int false_block = instr->FalseDestination(chunk_);
2064 __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
2068 template<
class InstrType>
2073 int false_block = instr->FalseDestination(chunk_);
2074 __ BranchF(chunk_->GetAssemblyLabel(false_block),
NULL,
2075 condition, src1, src2);
2079 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2080 __ stop(
"LDebugBreak");
2084 void LCodeGen::DoBranch(LBranch* instr) {
2085 Representation r = instr->hydrogen()->value()->representation();
2086 if (r.IsInteger32() || r.IsSmi()) {
2087 DCHECK(!info()->IsStub());
2090 }
else if (r.IsDouble()) {
2091 DCHECK(!info()->IsStub());
2098 HType type = instr->hydrogen()->value()->type();
2099 if (type.IsBoolean()) {
2100 DCHECK(!info()->IsStub());
2101 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2103 }
else if (type.IsSmi()) {
2104 DCHECK(!info()->IsStub());
2106 }
else if (type.IsJSArray()) {
2107 DCHECK(!info()->IsStub());
2109 }
else if (type.IsHeapNumber()) {
2110 DCHECK(!info()->IsStub());
2115 }
else if (type.IsString()) {
2116 DCHECK(!info()->IsStub());
2120 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2126 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2127 __ Branch(instr->FalseLabel(chunk_),
eq, reg, Operand(at));
2131 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2132 __ Branch(instr->TrueLabel(chunk_),
eq, reg, Operand(at));
2133 __ LoadRoot(at, Heap::kFalseValueRootIndex);
2134 __ Branch(instr->FalseLabel(chunk_),
eq, reg, Operand(at));
2138 __ LoadRoot(at, Heap::kNullValueRootIndex);
2139 __ Branch(instr->FalseLabel(chunk_),
eq, reg, Operand(at));
2144 __ Branch(instr->FalseLabel(chunk_),
eq, reg, Operand(zero_reg));
2145 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2146 }
else if (expected.NeedsMap()) {
2153 if (expected.NeedsMap()) {
2155 if (expected.CanBeUndetectable()) {
2159 __ Branch(instr->FalseLabel(chunk_),
ne, at, Operand(zero_reg));
2166 __ Branch(instr->TrueLabel(chunk_),
2176 __ Branch(instr->TrueLabel(chunk_),
ne, at, Operand(zero_reg));
2177 __ Branch(instr->FalseLabel(chunk_));
2178 __ bind(¬_string);
2183 const Register scratch =
scratch1();
2185 __ Branch(instr->TrueLabel(chunk_),
eq, scratch, Operand(
SYMBOL_TYPE));
2191 Label not_heap_number;
2192 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2193 __ Branch(¬_heap_number,
ne,
map, Operand(at));
2195 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2198 __ Branch(instr->FalseLabel(chunk_));
2199 __ bind(¬_heap_number);
2202 if (!expected.IsGeneric()) {
2220 void LCodeGen::DoGoto(LGoto* instr) {
2229 case Token::EQ_STRICT:
2233 case Token::NE_STRICT:
2237 cond = is_unsigned ?
lo :
lt;
2240 cond = is_unsigned ?
hi :
gt;
2243 cond = is_unsigned ?
ls :
le;
2246 cond = is_unsigned ?
hs :
ge;
2249 case Token::INSTANCEOF:
2257 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2258 LOperand* left = instr->left();
2259 LOperand* right = instr->right();
2265 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2267 double left_val =
ToDouble(LConstantOperand::cast(left));
2268 double right_val =
ToDouble(LConstantOperand::cast(right));
2269 int next_block =
EvalComparison(instr->op(), left_val, right_val) ?
2270 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2273 if (instr->is_double()) {
2281 __ BranchF(
NULL, instr->FalseLabel(chunk_),
eq,
2282 left_reg, right_reg);
2287 Operand cmp_right = Operand((int64_t)0);
2288 if (right->IsConstantOperand()) {
2290 if (instr->hydrogen_value()->representation().IsSmi()) {
2295 cmp_right = Operand(value);
2297 }
else if (left->IsConstantOperand()) {
2299 if (instr->hydrogen_value()->representation().IsSmi()) {
2304 cmp_right = Operand(value);
2313 EmitBranch(instr, cond, cmp_left, cmp_right);
2319 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2327 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2328 if (instr->hydrogen()->representation().IsTagged()) {
2329 Register input_reg =
ToRegister(instr->object());
2330 __ li(at, Operand(factory()->the_hole_value()));
2339 __ FmoveHigh(scratch, input_reg);
2344 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2345 Representation rep = instr->hydrogen()->value()->representation();
2346 DCHECK(!rep.IsInteger32());
2347 Register scratch =
ToRegister(instr->temp());
2349 if (rep.IsDouble()) {
2352 __ FmoveHigh(scratch, value);
2354 __ dsll32(scratch, scratch, 0);
2355 __ dsrl32(scratch, scratch, 0);
2356 __ li(at, 0x80000000);
2361 Heap::kHeapNumberMapRootIndex,
2362 instr->FalseLabel(
chunk()),
2367 __ mov(at, zero_reg);
2376 Label* is_not_object,
2378 __ JumpIfSmi(input, is_not_object);
2380 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2381 __ Branch(is_object,
eq, input, Operand(temp2));
2388 __ Branch(is_not_object,
ne, temp2, Operand(zero_reg));
2392 __ Branch(is_not_object,
2399 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2406 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2415 Label* is_not_string,
2418 __ JumpIfSmi(input, is_not_string);
2420 __ GetObjectType(input, temp1, temp1);
2426 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2431 instr->hydrogen()->value()->type().IsHeapObject()
2434 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2441 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2448 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2452 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2453 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2464 case Token::EQ_STRICT:
2482 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2486 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2491 EmitBranch(instr, condition, v0, Operand(zero_reg));
2507 if (from ==
to)
return eq;
2515 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2519 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2520 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2523 __ GetObjectType(input, scratch, scratch);
2527 Operand(
TestType(instr->hydrogen())));
2531 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2533 Register result =
ToRegister(instr->result());
2535 __ AssertString(input);
2538 __ IndexFromHash(result, result);
2542 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2543 LHasCachedArrayIndexAndBranch* instr) {
2558 Handle<String>class_name,
2563 DCHECK(!input.is(temp2));
2566 __ JumpIfSmi(input, is_false);
2568 if (
String::Equals(isolate()->factory()->Function_string(), class_name)) {
2578 __ GetObjectType(input, temp, temp2);
2585 __ GetObjectType(input, temp, temp2);
2596 __ GetObjectType(temp, temp2, temp2);
2597 if (
String::Equals(class_name, isolate()->factory()->Object_string())) {
2620 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2624 Handle<String> class_name = instr->hydrogen()->class_name();
2627 class_name, input, temp, temp2);
2633 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2642 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2644 Label true_label, done;
2647 Register result =
ToRegister(instr->result());
2653 __ Branch(&true_label,
eq, result, Operand(zero_reg));
2654 __ li(result, Operand(factory()->false_value()));
2656 __ bind(&true_label);
2657 __ li(result, Operand(factory()->true_value()));
2662 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2665 DeferredInstanceOfKnownGlobal(
LCodeGen* codegen,
2666 LInstanceOfKnownGlobal* instr)
2669 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2671 virtual LInstruction* instr()
OVERRIDE {
return instr_; }
2672 Label* map_check() {
return &map_check_; }
2675 LInstanceOfKnownGlobal* instr_;
2679 DeferredInstanceOfKnownGlobal* deferred;
2680 deferred =
new(zone()) DeferredInstanceOfKnownGlobal(
this, instr);
2682 Label done, false_result;
2683 Register
object =
ToRegister(instr->value());
2685 Register result =
ToRegister(instr->result());
2691 __ JumpIfSmi(
object, &false_result);
2697 Register
map = temp;
2701 __ bind(deferred->map_check());
2705 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2706 __ li(at, Operand(Handle<Object>(cell)));
2708 __ BranchShort(&cache_miss,
ne,
map, Operand(at));
2712 __ li(result, Operand(factory()->the_hole_value()));
2717 __ bind(&cache_miss);
2719 __ LoadRoot(temp, Heap::kNullValueRootIndex);
2720 __ Branch(&false_result,
eq,
object, Operand(temp));
2724 __ Branch(&false_result,
cc, temp, Operand(zero_reg));
2727 __ Branch(deferred->entry());
2729 __ bind(&false_result);
2730 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2734 __ bind(deferred->exit());
2741 Register result =
ToRegister(instr->result());
2751 InstanceofStub stub(isolate(),
flags);
2753 PushSafepointRegistersScope
scope(
this);
2762 static const int kAdditionalDelta = 13;
2763 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2764 Label before_push_delta;
2765 __ bind(&before_push_delta);
2769 __ StoreToSafepointRegisterSlot(temp, temp);
2775 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2776 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2779 __ StoreToSafepointRegisterSlot(result, result);
2783 void LCodeGen::DoCmpT(LCmpT* instr) {
2787 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2798 __ LoadRoot(
ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2799 DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
2800 __ LoadRoot(
ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2805 void LCodeGen::DoReturn(LReturn* instr) {
2806 if (FLAG_trace && info()->IsOptimizing()) {
2815 if (info()->saves_caller_doubles()) {
2818 int no_frame_start = -1;
2821 no_frame_start = masm_->pc_offset();
2824 if (instr->has_constant_parameter_count()) {
2825 int parameter_count =
ToInteger32(instr->constant_parameter_count());
2827 if (sp_delta != 0) {
2828 __ Daddu(
sp,
sp, Operand(sp_delta));
2831 Register reg =
ToRegister(instr->parameter_count());
2840 if (no_frame_start != -1) {
2841 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2846 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2847 Register result =
ToRegister(instr->result());
2848 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2850 if (instr->hydrogen()->RequiresHoleCheck()) {
2851 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2860 Register vector =
ToRegister(instr->temp_vector());
2862 __ li(vector, instr->hydrogen()->feedback_vector());
2870 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2877 if (FLAG_vector_ics) {
2878 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2881 Handle<Code> ic = CodeFactory::LoadIC(isolate(),
mode).code();
2886 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2891 __ li(cell, Operand(instr->hydrogen()->cell().handle()));
2897 if (instr->hydrogen()->RequiresHoleCheck()) {
2899 Register payload =
ToRegister(instr->temp());
2901 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2911 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2912 Register context =
ToRegister(instr->context());
2913 Register result =
ToRegister(instr->result());
2916 if (instr->hydrogen()->RequiresHoleCheck()) {
2917 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2919 if (instr->hydrogen()->DeoptimizesOnHole()) {
2923 __ Branch(&is_not_hole,
ne, result, Operand(at));
2924 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2925 __ bind(&is_not_hole);
2931 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2932 Register context =
ToRegister(instr->context());
2937 Label skip_assignment;
2939 if (instr->hydrogen()->RequiresHoleCheck()) {
2940 __ ld(scratch, target);
2941 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2943 if (instr->hydrogen()->DeoptimizesOnHole()) {
2946 __ Branch(&skip_assignment,
ne, scratch, Operand(at));
2950 __ sd(value, target);
2951 if (instr->hydrogen()->NeedsWriteBarrier()) {
2953 instr->hydrogen()->value()->type().IsHeapObject()
2955 __ RecordWriteContextSlot(context,
2965 __ bind(&skip_assignment);
2969 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2970 HObjectAccess access = instr->hydrogen()->access();
2971 int offset = access.offset();
2972 Register
object =
ToRegister(instr->object());
2973 if (access.IsExternalMemory()) {
2974 Register result =
ToRegister(instr->result());
2976 __ Load(result, operand, access.representation());
2980 if (instr->hydrogen()->representation().IsDouble()) {
2986 Register result =
ToRegister(instr->result());
2987 if (!access.IsInobject()) {
2992 Representation representation = access.representation();
2994 instr->hydrogen()->representation().IsInteger32()) {
2995 if (FLAG_debug_code) {
2999 __ AssertSmi(scratch);
3012 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3019 if (FLAG_vector_ics) {
3020 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3022 Handle<Code> ic = CodeFactory::LoadIC(isolate(),
NOT_CONTEXTUAL).code();
3027 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3029 Register
function =
ToRegister(instr->function());
3030 Register result =
ToRegister(instr->result());
3037 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3042 __ GetObjectType(result, scratch, scratch);
3053 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3054 Register result =
ToRegister(instr->result());
3055 __ LoadRoot(result, instr->index());
3059 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3060 Register arguments =
ToRegister(instr->arguments());
3061 Register result =
ToRegister(instr->result());
3064 if (instr->length()->IsConstantOperand()) {
3065 int const_length =
ToInteger32(LConstantOperand::cast(instr->length()));
3066 if (instr->index()->IsConstantOperand()) {
3067 int const_index =
ToInteger32(LConstantOperand::cast(instr->index()));
3068 int index = (const_length - const_index) + 1;
3072 __ li(at, Operand(const_length + 1));
3073 __ Dsubu(result, at, index);
3075 __ Daddu(at, arguments, at);
3078 }
else if (instr->index()->IsConstantOperand()) {
3079 Register length =
ToRegister(instr->length());
3080 int const_index =
ToInteger32(LConstantOperand::cast(instr->index()));
3081 int loc = const_index - 1;
3083 __ Dsubu(result, length, Operand(loc));
3085 __ Daddu(at, arguments, at);
3089 __ Daddu(at, arguments, at);
3093 Register length =
ToRegister(instr->length());
3095 __ Dsubu(result, length, index);
3096 __ Daddu(result, result, 1);
3098 __ Daddu(at, arguments, at);
3105 Register external_pointer =
ToRegister(instr->elements());
3108 bool key_is_constant = instr->key()->IsConstantOperand();
3109 int constant_key = 0;
3110 if (key_is_constant) {
3111 constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
3112 if (constant_key & 0xF0000000) {
3113 Abort(kArrayIndexConstantValueTooBig);
3119 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3121 : element_size_shift;
3122 int base_offset = instr->base_offset();
3128 int base_offset = instr->base_offset();
3130 if (key_is_constant) {
3132 constant_key << element_size_shift);
3134 if (shift_size < 0) {
3135 if (shift_size == -32) {
3148 __ cvt_d_s(result, result);
3153 Register result =
ToRegister(instr->result());
3155 key, external_pointer, key_is_constant, constant_key,
3156 element_size_shift, shift_size, base_offset);
3157 switch (elements_kind) {
3160 __ lb(result, mem_operand);
3166 __ lbu(result, mem_operand);
3170 __ lh(result, mem_operand);
3174 __ lhu(result, mem_operand);
3178 __ lw(result, mem_operand);
3182 __ lw(result, mem_operand);
3185 Operand(0x80000000));
3208 Register elements =
ToRegister(instr->elements());
3209 bool key_is_constant = instr->key()->IsConstantOperand();
3216 int base_offset = instr->base_offset();
3217 if (key_is_constant) {
3218 int constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
3219 if (constant_key & 0xF0000000) {
3220 Abort(kArrayIndexConstantValueTooBig);
3224 __ Daddu(scratch, elements, Operand(base_offset));
3226 if (!key_is_constant) {
3228 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3230 : element_size_shift;
3231 if (shift_size > 0) {
3232 __ dsll(at, key, shift_size);
3233 }
else if (shift_size == -32) {
3234 __ dsra32(at, key, 0);
3236 __ dsra(at, key, -shift_size);
3238 __ Daddu(scratch, scratch, at);
3243 if (instr->hydrogen()->RequiresHoleCheck()) {
3251 HLoadKeyed* hinstr = instr->hydrogen();
3252 Register elements =
ToRegister(instr->elements());
3253 Register result =
ToRegister(instr->result());
3255 Register store_base = scratch;
3256 int offset = instr->base_offset();
3258 if (instr->key()->IsConstantOperand()) {
3259 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3261 store_base = elements;
3268 if (instr->hydrogen()->key()->representation().IsSmi()) {
3270 __ daddu(scratch, elements, scratch);
3273 __ daddu(scratch, elements, scratch);
3277 Representation representation = hinstr->representation();
3280 DCHECK(!hinstr->RequiresHoleCheck());
3281 if (FLAG_debug_code) {
3293 __ Load(result,
MemOperand(store_base, offset), representation);
3296 if (hinstr->RequiresHoleCheck()) {
3298 __ SmiTst(result, scratch);
3299 DeoptimizeIf(
ne, instr,
"not a Smi", scratch, Operand(zero_reg));
3301 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3308 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3309 if (instr->is_typed_elements()) {
3311 }
else if (instr->hydrogen()->representation().IsDouble()) {
3321 bool key_is_constant,
3326 if (key_is_constant) {
3327 return MemOperand(base, (constant_key << element_size) + base_offset);
3330 if (base_offset == 0) {
3331 if (shift_size >= 0) {
3336 if (shift_size == -32) {
3346 if (shift_size >= 0) {
3351 if (shift_size == -32) {
3362 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3367 if (FLAG_vector_ics) {
3368 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3371 Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
3376 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3379 Register result =
ToRegister(instr->result());
3381 if (instr->hydrogen()->from_inlined()) {
3385 Label done, adapted;
3392 __ Movn(result,
fp, temp);
3393 __ Movz(result, scratch, temp);
3398 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3399 Register elem =
ToRegister(instr->elements());
3400 Register result =
ToRegister(instr->result());
3405 __ Daddu(result, zero_reg, Operand(
scope()->num_parameters()));
3406 __ Branch(&done,
eq,
fp, Operand(elem));
3412 __ SmiUntag(result);
3419 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3420 Register receiver =
ToRegister(instr->receiver());
3421 Register
function =
ToRegister(instr->function());
3422 Register result =
ToRegister(instr->result());
3428 Label global_object, result_in_receiver;
3430 if (!instr->hydrogen()->known_function()) {
3436 int32_t strict_mode_function_mask =
3442 __ And(at, at, Operand(strict_mode_function_mask));
3443 __ Branch(&result_in_receiver,
ne, at, Operand(zero_reg));
3446 __ And(at, at, Operand(native_mask));
3447 __ Branch(&result_in_receiver,
ne, at, Operand(zero_reg));
3451 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3452 __ Branch(&global_object,
eq, receiver, Operand(scratch));
3453 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3454 __ Branch(&global_object,
eq, receiver, Operand(scratch));
3457 __ SmiTst(receiver, scratch);
3460 __ GetObjectType(receiver, scratch, scratch);
3463 __ Branch(&result_in_receiver);
3465 __ bind(&global_object);
3472 if (result.is(receiver)) {
3473 __ bind(&result_in_receiver);
3476 __ Branch(&result_ok);
3477 __ bind(&result_in_receiver);
3478 __ mov(result, receiver);
3479 __ bind(&result_ok);
3484 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3485 Register receiver =
ToRegister(instr->receiver());
3486 Register
function =
ToRegister(instr->function());
3487 Register length =
ToRegister(instr->length());
3488 Register elements =
ToRegister(instr->elements());
3498 Operand(kArgumentsLimit));
3503 __ Move(receiver, length);
3514 __ Daddu(scratch, elements, scratch);
3517 __ Dsubu(length, length, Operand(1));
3522 DCHECK(instr->HasPointerMap());
3523 LPointerMap* pointers = instr->pointer_map();
3525 this, pointers, Safepoint::kLazyDeopt);
3528 ParameterCount actual(receiver);
3529 __ InvokeFunction(
function, actual,
CALL_FUNCTION, safepoint_generator);
3533 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3534 LOperand* argument = instr->value();
3535 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3536 Abort(kDoPushArgumentNotImplementedForDoubleType);
3539 __ push(argument_reg);
3544 void LCodeGen::DoDrop(LDrop* instr) {
3545 __ Drop(instr->count());
3549 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3550 Register result =
ToRegister(instr->result());
3555 void LCodeGen::DoContext(LContext* instr) {
3557 Register result =
ToRegister(instr->result());
3558 if (info()->IsOptimizing()) {
3567 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3569 __ li(
scratch0(), instr->hydrogen()->pairs());
3578 int formal_parameter_count,
3580 LInstruction* instr,
3582 bool dont_adapt_arguments =
3584 bool can_invoke_directly =
3585 dont_adapt_arguments || formal_parameter_count == arity;
3587 LPointerMap* pointers = instr->pointer_map();
3589 if (can_invoke_directly) {
3591 __ li(a1,
function);
3599 if (dont_adapt_arguments) {
3600 __ li(a0, Operand(arity));
3611 ParameterCount count(arity);
3612 ParameterCount expected(formal_parameter_count);
3613 __ InvokeFunction(
function, expected, count,
CALL_FUNCTION, generator);
3622 Register result =
ToRegister(instr->result());
3627 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3628 DeoptimizeIf(
ne, instr,
"not a heap number", scratch, Operand(at));
3636 __ Move(result, input);
3638 __ Branch(&done,
eq, at, Operand(zero_reg));
3643 PushSafepointRegistersScope
scope(
this);
3647 Register tmp1 = input.is(a1) ? a0 : a1;
3648 Register tmp2 = input.is(a2) ? a0 : a2;
3649 Register tmp3 = input.is(a3) ? a0 : a3;
3650 Register tmp4 = input.is(a4) ? a0 : a4;
3654 Label allocated, slow;
3655 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3656 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3657 __ Branch(&allocated);
3668 __ LoadFromSafepointRegisterSlot(input, input);
3671 __ bind(&allocated);
3679 __ StoreToSafepointRegisterSlot(tmp1, result);
3688 Register result =
ToRegister(instr->result());
3692 __ mov(result, input);
3693 __ dsubu(result, zero_reg, input);
3700 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3704 DeferredMathAbsTaggedHeapNumber(
LCodeGen* codegen, LMathAbs* instr)
3707 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3709 virtual LInstruction* instr()
OVERRIDE {
return instr_; }
3714 Representation r = instr->hydrogen()->value()->representation();
3718 __ abs_d(result, input);
3719 }
else if (r.IsSmiOrInteger32()) {
3723 DeferredMathAbsTaggedHeapNumber* deferred =
3724 new(zone()) DeferredMathAbsTaggedHeapNumber(
this, instr);
3727 __ JumpIfNotSmi(input, deferred->entry());
3730 __ bind(deferred->exit());
3735 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3737 Register result =
ToRegister(instr->result());
3739 Register except_flag =
ToRegister(instr->temp());
3755 __ Branch(&done,
ne, result, Operand(zero_reg));
3764 void LCodeGen::DoMathRound(LMathRound* instr) {
3766 Register result =
ToRegister(instr->result());
3769 Label done, check_sign_on_zero;
3772 __ mfhc1(result, input);
3781 __ mov(result, zero_reg);
3783 __ Branch(&check_sign_on_zero);
3804 __ dsll32(result, result, 0);
3805 __ dsrl32(result, result, 0);
3806 __ Xor(result, result, Operand(scratch));
3809 DeoptimizeIf(
lt, instr,
"minus zero", result, Operand(zero_reg));
3814 __ Branch(&skip2,
ge, result, Operand(zero_reg));
3815 __ mov(result, zero_reg);
3820 Register except_flag = scratch;
3833 __ Branch(&done,
ne, result, Operand(zero_reg));
3834 __ bind(&check_sign_on_zero);
3835 __ mfhc1(scratch, input);
3837 DeoptimizeIf(
ne, instr,
"minus zero", scratch, Operand(zero_reg));
3843 void LCodeGen::DoMathFround(LMathFround* instr) {
3846 __ cvt_s_d(result, input);
3847 __ cvt_d_s(result, result);
3851 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3854 __ sqrt_d(result, input);
3858 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3863 DCHECK(!input.is(result));
3873 __ neg_d(result, temp);
3877 __ sqrt_d(result, result);
3882 void LCodeGen::DoPower(LPower* instr) {
3883 Representation exponent_type = instr->hydrogen()->right()->representation();
3887 DCHECK(!instr->right()->IsDoubleRegister() ||
3889 DCHECK(!instr->right()->IsRegister() ||
3894 if (exponent_type.IsSmi()) {
3897 }
else if (exponent_type.IsTagged()) {
3899 __ JumpIfSmi(tagged_exponent, &no_deopt);
3900 DCHECK(!a7.is(tagged_exponent));
3902 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3907 }
else if (exponent_type.IsInteger32()) {
3911 DCHECK(exponent_type.IsDouble());
3918 void LCodeGen::DoMathExp(LMathExp* instr) {
3927 masm(), input, result, double_scratch1, double_scratch2,
3932 void LCodeGen::DoMathLog(LMathLog* instr) {
3935 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3941 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3943 Register result =
ToRegister(instr->result());
3944 __ Clz(result, input);
3948 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3951 DCHECK(instr->HasPointerMap());
3953 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3954 if (known_function.is_null()) {
3955 LPointerMap* pointers = instr->pointer_map();
3957 ParameterCount count(instr->arity());
3961 instr->hydrogen()->formal_parameter_count(),
3969 void LCodeGen::DoTailCallThroughMegamorphicCache(
3970 LTailCallThroughMegamorphicCache* instr) {
3971 Register receiver =
ToRegister(instr->receiver());
3978 Register scratch = a3;
3979 Register extra = a4;
3980 Register extra2 = a5;
3981 Register extra3 = a6;
3987 isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
3988 must_teardown_frame, receiver,
name,
3989 scratch, extra, extra2, extra3);
3997 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
4000 LPointerMap* pointers = instr->pointer_map();
4003 if (instr->target()->IsConstantOperand()) {
4004 LConstantOperand* target = LConstantOperand::cast(instr->target());
4009 DCHECK(instr->target()->IsRegister());
4010 Register target =
ToRegister(instr->target());
4011 generator.BeforeCall(
__ CallSize(target));
4015 generator.AfterCall();
4019 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4023 if (instr->hydrogen()->pass_argument_count()) {
4024 __ li(a0, Operand(instr->arity()));
4038 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4043 int arity = instr->arity();
4044 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
4049 void LCodeGen::DoCallNew(LCallNew* instr) {
4054 __ li(a0, Operand(instr->arity()));
4056 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
4062 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4067 __ li(a0, Operand(instr->arity()));
4068 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
4069 ElementsKind kind = instr->hydrogen()->elements_kind();
4075 if (instr->arity() == 0) {
4076 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4078 }
else if (instr->arity() == 1) {
4085 __ Branch(&packed_case,
eq, a5, Operand(zero_reg));
4088 ArraySingleArgumentConstructorStub stub(isolate(),
4093 __ bind(&packed_case);
4096 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4100 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4106 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4107 CallRuntime(instr->function(), instr->arity(), instr);
4111 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4112 Register
function =
ToRegister(instr->function());
4113 Register code_object =
ToRegister(instr->code_object());
4114 __ Daddu(code_object, code_object,
4121 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4122 Register result =
ToRegister(instr->result());
4123 Register base =
ToRegister(instr->base_object());
4124 if (instr->offset()->IsConstantOperand()) {
4125 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4128 Register offset =
ToRegister(instr->offset());
4129 __ Daddu(result, base, offset);
4134 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4135 Representation representation = instr->representation();
4137 Register
object =
ToRegister(instr->object());
4140 HObjectAccess access = instr->hydrogen()->access();
4141 int offset = access.offset();
4142 if (access.IsExternalMemory()) {
4145 __ Store(value, operand, representation);
4149 __ AssertNotSmi(
object);
4151 DCHECK(!representation.IsSmi() ||
4152 !instr->value()->IsConstantOperand() ||
4153 IsSmi(LConstantOperand::cast(instr->value())));
4154 if (representation.IsDouble()) {
4155 DCHECK(access.IsInobject());
4156 DCHECK(!instr->hydrogen()->has_transition());
4157 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4163 if (instr->hydrogen()->has_transition()) {
4164 Handle<Map> transition = instr->hydrogen()->transition_map();
4165 AddDeprecationDependency(transition);
4168 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4171 __ RecordWriteForMap(
object,
4180 Register destination = object;
4181 if (!access.IsInobject()) {
4187 instr->hydrogen()->value()->representation().IsInteger32()) {
4189 if (FLAG_debug_code) {
4191 __ AssertSmi(scratch2);
4200 __ Store(value, operand, representation);
4201 if (instr->hydrogen()->NeedsWriteBarrier()) {
4203 __ RecordWriteField(destination,
4210 instr->hydrogen()->SmiCheckForWriteBarrier(),
4211 instr->hydrogen()->PointersToHereCheckForValue());
4216 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4227 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4229 Operand operand((int64_t)0);
4231 if (instr->index()->IsConstantOperand()) {
4239 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4242 __ stop(
"eliminated bounds check failed");
4251 Register external_pointer =
ToRegister(instr->elements());
4254 bool key_is_constant = instr->key()->IsConstantOperand();
4255 int constant_key = 0;
4256 if (key_is_constant) {
4257 constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
4258 if (constant_key & 0xF0000000) {
4259 Abort(kArrayIndexConstantValueTooBig);
4265 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4267 : element_size_shift;
4268 int base_offset = instr->base_offset();
4276 if (key_is_constant) {
4277 if (constant_key != 0) {
4278 __ Daddu(address, external_pointer,
4279 Operand(constant_key << element_size_shift));
4281 address = external_pointer;
4284 if (shift_size < 0) {
4285 if (shift_size == -32) {
4286 __ dsra32(address, key, 0);
4288 __ dsra(address, key, -shift_size);
4291 __ dsll(address, key, shift_size);
4293 __ Daddu(address, external_pointer, address);
4306 key, external_pointer, key_is_constant, constant_key,
4307 element_size_shift, shift_size,
4309 switch (elements_kind) {
4316 __ sb(value, mem_operand);
4322 __ sh(value, mem_operand);
4328 __ sw(value, mem_operand);
4351 Register elements =
ToRegister(instr->elements());
4354 bool key_is_constant = instr->key()->IsConstantOperand();
4355 int base_offset = instr->base_offset();
4356 Label not_nan, done;
4361 if (key_is_constant) {
4362 int constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
4363 if (constant_key & 0xF0000000) {
4364 Abort(kArrayIndexConstantValueTooBig);
4366 __ Daddu(scratch, elements,
4367 Operand((constant_key << element_size_shift) + base_offset));
4369 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4371 : element_size_shift;
4372 __ Daddu(scratch, elements, Operand(base_offset));
4373 DCHECK((shift_size == 3) || (shift_size == -29));
4374 if (shift_size == 3) {
4376 }
else if (shift_size == -29) {
4379 __ Daddu(scratch, scratch, at);
4382 if (instr->NeedsCanonicalization()) {
4385 __ BranchF(
NULL, &is_nan,
eq, value, value);
4386 __ Branch(¬_nan);
4390 __ LoadRoot(at, Heap::kNanValueRootIndex);
4404 Register elements =
ToRegister(instr->elements());
4405 Register key = instr->key()->IsRegister() ?
ToRegister(instr->key())
4408 Register store_base = scratch;
4409 int offset = instr->base_offset();
4412 if (instr->key()->IsConstantOperand()) {
4413 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4414 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4416 store_base = elements;
4422 if (instr->hydrogen()->key()->representation().IsSmi()) {
4424 __ daddu(store_base, elements, scratch);
4427 __ daddu(store_base, elements, scratch);
4431 Representation representation = instr->hydrogen()->value()->representation();
4435 if (FLAG_debug_code) {
4448 __ Store(value,
MemOperand(store_base, offset), representation);
4450 if (instr->hydrogen()->NeedsWriteBarrier()) {
4452 instr->hydrogen()->value()->type().IsHeapObject()
4455 __ Daddu(key, store_base, Operand(offset));
4456 __ RecordWrite(elements,
4463 instr->hydrogen()->PointersToHereCheckForValue());
4468 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4470 if (instr->is_typed_elements()) {
4472 }
else if (instr->hydrogen()->value()->representation().IsDouble()) {
4480 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4487 CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
4492 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4493 Register object_reg =
ToRegister(instr->object());
4496 Handle<Map> from_map = instr->original_map();
4497 Handle<Map> to_map = instr->transitioned_map();
4501 Label not_applicable;
4503 __ Branch(¬_applicable,
ne, scratch, Operand(from_map));
4506 Register new_map_reg =
ToRegister(instr->new_map_temp());
4507 __ li(new_map_reg, Operand(to_map));
4510 __ RecordWriteForMap(object_reg,
4516 DCHECK(object_reg.is(a0));
4518 PushSafepointRegistersScope
scope(
this);
4519 __ li(a1, Operand(to_map));
4520 bool is_js_array = from_map->instance_type() ==
JS_ARRAY_TYPE;
4521 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4524 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4526 __ bind(¬_applicable);
4530 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4531 Register
object =
ToRegister(instr->object());
4533 Label no_memento_found;
4534 __ TestJSArrayForAllocationMemento(
object, temp, &no_memento_found,
4535 ne, &no_memento_found);
4537 __ bind(&no_memento_found);
4541 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4545 StringAddStub stub(isolate(),
4546 instr->hydrogen()->flags(),
4547 instr->hydrogen()->pretenure_flag());
4552 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4555 DeferredStringCharCodeAt(
LCodeGen* codegen, LStringCharCodeAt* instr)
4558 codegen()->DoDeferredStringCharCodeAt(instr_);
4560 virtual LInstruction* instr()
OVERRIDE {
return instr_; }
4562 LStringCharCodeAt* instr_;
4565 DeferredStringCharCodeAt* deferred =
4566 new(zone()) DeferredStringCharCodeAt(
this, instr);
4572 __ bind(deferred->exit());
4577 Register
string =
ToRegister(instr->string());
4578 Register result =
ToRegister(instr->result());
4584 __ mov(result, zero_reg);
4586 PushSafepointRegistersScope
scope(
this);
4590 if (instr->index()->IsConstantOperand()) {
4591 int const_index =
ToInteger32(LConstantOperand::cast(instr->index()));
4603 __ StoreToSafepointRegisterSlot(v0, result);
4607 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4610 DeferredStringCharFromCode(
LCodeGen* codegen, LStringCharFromCode* instr)
4613 codegen()->DoDeferredStringCharFromCode(instr_);
4615 virtual LInstruction* instr()
OVERRIDE {
return instr_; }
4617 LStringCharFromCode* instr_;
4620 DeferredStringCharFromCode* deferred =
4621 new(zone()) DeferredStringCharFromCode(
this, instr);
4623 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4624 Register char_code =
ToRegister(instr->char_code());
4625 Register result =
ToRegister(instr->result());
4627 DCHECK(!char_code.is(result));
4629 __ Branch(deferred->entry(),
hi,
4631 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4633 __ Daddu(result, result, scratch);
4635 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4636 __ Branch(deferred->entry(),
eq, result, Operand(scratch));
4637 __ bind(deferred->exit());
4642 Register char_code =
ToRegister(instr->char_code());
4643 Register result =
ToRegister(instr->result());
4648 __ mov(result, zero_reg);
4650 PushSafepointRegistersScope
scope(
this);
4651 __ SmiTag(char_code);
4654 __ StoreToSafepointRegisterSlot(v0, result);
4658 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4659 LOperand* input = instr->value();
4660 DCHECK(input->IsRegister() || input->IsStackSlot());
4661 LOperand* output = instr->result();
4662 DCHECK(output->IsDoubleRegister());
4664 if (input->IsStackSlot()) {
4667 __ mtc1(scratch, single_scratch);
4675 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4676 LOperand* input = instr->value();
4677 LOperand* output = instr->result();
4685 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4688 DeferredNumberTagU(
LCodeGen* codegen, LNumberTagU* instr)
4691 codegen()->DoDeferredNumberTagIU(instr_,
4697 virtual LInstruction* instr()
OVERRIDE {
return instr_; }
4699 LNumberTagU* instr_;
4703 Register result =
ToRegister(instr->result());
4705 DeferredNumberTagU* deferred =
new(zone()) DeferredNumberTagU(
this, instr);
4707 __ SmiTag(result, input);
4708 __ bind(deferred->exit());
4716 IntegerSignedness signedness) {
4730 __ SmiUntag(src, dst);
4731 __ Xor(src, src, Operand(0x80000000));
4733 __ mtc1(src, dbl_scratch);
4734 __ cvt_d_w(dbl_scratch, dbl_scratch);
4736 __ mtc1(src, dbl_scratch);
4737 __ Cvt_d_uw(dbl_scratch, dbl_scratch,
f22);
4740 if (FLAG_inline_new) {
4741 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4742 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow,
TAG_RESULT);
4752 __ mov(dst, zero_reg);
4754 PushSafepointRegistersScope
scope(
this);
4762 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4764 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4765 __ StoreToSafepointRegisterSlot(v0, dst);
4775 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4778 DeferredNumberTagD(
LCodeGen* codegen, LNumberTagD* instr)
4781 codegen()->DoDeferredNumberTagD(instr_);
4783 virtual LInstruction* instr()
OVERRIDE {
return instr_; }
4785 LNumberTagD* instr_;
4794 DeferredNumberTagD* deferred =
new(zone()) DeferredNumberTagD(
this, instr);
4795 if (FLAG_inline_new) {
4796 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4798 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4801 __ Branch(deferred->entry());
4803 __ bind(deferred->exit());
4815 __ mov(reg, zero_reg);
4817 PushSafepointRegistersScope
scope(
this);
4824 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4826 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4828 __ StoreToSafepointRegisterSlot(v0, reg);
4832 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4833 HChange* hchange = instr->hydrogen();
4835 Register output =
ToRegister(instr->result());
4838 __ And(at, input, Operand(0x80000000));
4843 __ SmiTagCheckOverflow(output, input, at);
4846 __ SmiTag(output, input);
4851 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4854 Register result =
ToRegister(instr->result());
4855 if (instr->needs_check()) {
4859 __ SmiUntag(result, input);
4860 DeoptimizeIf(
ne, instr,
"not a Smi", scratch, Operand(zero_reg));
4862 __ SmiUntag(result, input);
4870 bool can_convert_undefined_to_nan =
4871 instr->hydrogen()->can_convert_undefined_to_nan();
4872 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4875 Label convert, load_smi, done;
4878 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4881 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4882 if (can_convert_undefined_to_nan) {
4883 __ Branch(&convert,
ne, scratch, Operand(at));
4885 DeoptimizeIf(
ne, instr,
"not a heap number", scratch, Operand(at));
4889 if (deoptimize_on_minus_zero) {
4890 __ mfc1(at, result_reg);
4891 __ Branch(&done,
ne, at, Operand(zero_reg));
4892 __ mfhc1(scratch, result_reg);
4897 if (can_convert_undefined_to_nan) {
4900 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4901 DeoptimizeIf(
ne, instr,
"not a heap number/undefined", input_reg,
4903 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4908 __ SmiUntag(scratch, input_reg);
4914 __ mtc1(scratch, result_reg);
4915 __ cvt_d_w(result_reg, result_reg);
4921 Register input_reg =
ToRegister(instr->value());
4923 Register scratch2 =
ToRegister(instr->temp());
4935 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4939 if (instr->truncating()) {
4942 Label no_heap_number, check_bools, check_false;
4945 __ mov(scratch2, input_reg);
4946 __ TruncateHeapNumberToI(input_reg, scratch2);
4951 __ bind(&no_heap_number);
4952 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4953 __ Branch(&check_bools,
ne, input_reg, Operand(at));
4956 __ mov(input_reg, zero_reg);
4958 __ bind(&check_bools);
4959 __ LoadRoot(at, Heap::kTrueValueRootIndex);
4960 __ Branch(&check_false,
ne, scratch2, Operand(at));
4962 __ li(input_reg, Operand(1));
4964 __ bind(&check_false);
4965 __ LoadRoot(at, Heap::kFalseValueRootIndex);
4966 DeoptimizeIf(
ne, instr,
"not a heap number/undefined/true/false", scratch2,
4969 __ mov(input_reg, zero_reg);
4977 Register except_flag = scratch2;
4990 __ Branch(&done,
ne, input_reg, Operand(zero_reg));
5001 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5004 DeferredTaggedToI(
LCodeGen* codegen, LTaggedToI* instr)
5007 codegen()->DoDeferredTaggedToI(instr_);
5009 virtual LInstruction* instr()
OVERRIDE {
return instr_; }
5014 LOperand* input = instr->value();
5015 DCHECK(input->IsRegister());
5016 DCHECK(input->Equals(instr->result()));
5020 if (instr->hydrogen()->value()->representation().IsSmi()) {
5021 __ SmiUntag(input_reg);
5023 DeferredTaggedToI* deferred =
new(zone()) DeferredTaggedToI(
this, instr);
5026 __ JumpIfNotSmi(input_reg, deferred->entry());
5029 __ SmiUntag(input_reg);
5030 __ bind(deferred->exit());
5035 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5036 LOperand* input = instr->value();
5037 DCHECK(input->IsRegister());
5038 LOperand* result = instr->result();
5039 DCHECK(result->IsDoubleRegister());
5044 HValue* value = instr->hydrogen()->value();
5052 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5053 Register result_reg =
ToRegister(instr->result());
5057 if (instr->truncating()) {
5058 __ TruncateDoubleToI(result_reg, double_input);
5076 __ Branch(&done,
ne, result_reg, Operand(zero_reg));
5086 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5087 Register result_reg =
ToRegister(instr->result());
5091 if (instr->truncating()) {
5092 __ TruncateDoubleToI(result_reg, double_input);
5110 __ Branch(&done,
ne, result_reg, Operand(zero_reg));
5117 __ SmiTag(result_reg, result_reg);
5121 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5122 LOperand* input = instr->value();
5128 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5129 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5130 LOperand* input = instr->value();
5137 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5141 __ GetObjectType(input, scratch, scratch);
5143 if (instr->hydrogen()->is_interval_check()) {
5146 instr->hydrogen()->GetCheckInterval(&first, &last);
5149 if (first == last) {
5150 DeoptimizeIf(
ne, instr,
"wrong instance type", scratch, Operand(first));
5152 DeoptimizeIf(
lo, instr,
"wrong instance type", scratch, Operand(first));
5155 DeoptimizeIf(
hi, instr,
"wrong instance type", scratch, Operand(last));
5161 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5165 __ And(at, scratch, mask);
5169 __ And(scratch, scratch, Operand(mask));
5170 DeoptimizeIf(
ne, instr,
"wrong instance type", scratch, Operand(tag));
5176 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5178 Handle<HeapObject>
object = instr->hydrogen()->object().handle();
5180 if (isolate()->heap()->InNewSpace(*
object)) {
5182 Handle<Cell> cell = isolate()->factory()->NewCell(
object);
5183 __ li(at, Operand(Handle<Object>(cell)));
5194 PushSafepointRegistersScope
scope(
this);
5196 __ mov(
cp, zero_reg);
5197 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5199 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5200 __ StoreToSafepointRegisterSlot(v0,
scratch0());
5203 DeoptimizeIf(
eq, instr,
"instance migration failed", at, Operand(zero_reg));
5207 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5210 DeferredCheckMaps(
LCodeGen* codegen, LCheckMaps* instr, Register
object)
5212 SetExit(check_maps());
5215 codegen()->DoDeferredInstanceMigration(instr_, object_);
5217 Label* check_maps() {
return &check_maps_; }
5218 virtual LInstruction* instr()
OVERRIDE {
return instr_; }
5225 if (instr->hydrogen()->IsStabilityCheck()) {
5226 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5227 for (
int i = 0;
i < maps->size(); ++
i) {
5228 AddStabilityDependency(maps->at(
i).handle());
5234 LOperand* input = instr->value();
5235 DCHECK(input->IsRegister());
5239 DeferredCheckMaps* deferred =
NULL;
5240 if (instr->hydrogen()->HasMigrationTarget()) {
5241 deferred =
new(zone()) DeferredCheckMaps(
this, instr, reg);
5242 __ bind(deferred->check_maps());
5245 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5247 for (
int i = 0;
i < maps->size() - 1;
i++) {
5248 Handle<Map>
map = maps->at(
i).handle();
5249 __ CompareMapAndBranch(map_reg,
map, &success,
eq, &success);
5251 Handle<Map>
map = maps->at(maps->size() - 1).handle();
5253 if (instr->hydrogen()->HasMigrationTarget()) {
5254 __ Branch(deferred->entry(),
ne, map_reg, Operand(
map));
5263 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5265 Register result_reg =
ToRegister(instr->result());
5267 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5271 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5272 Register unclamped_reg =
ToRegister(instr->unclamped());
5273 Register result_reg =
ToRegister(instr->result());
5274 __ ClampUint8(result_reg, unclamped_reg);
5278 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5280 Register input_reg =
ToRegister(instr->unclamped());
5281 Register result_reg =
ToRegister(instr->result());
5283 Label is_smi, done, heap_number;
5286 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5290 __ Branch(&heap_number,
eq, scratch, Operand(factory()->heap_number_map()));
5294 DeoptimizeIf(
ne, instr,
"not a heap number/undefined", input_reg,
5295 Operand(factory()->undefined_value()));
5296 __ mov(result_reg, zero_reg);
5300 __ bind(&heap_number);
5307 __ ClampUint8(result_reg, scratch);
5313 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5315 Register result_reg =
ToRegister(instr->result());
5316 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5317 __ FmoveHigh(result_reg, value_reg);
5319 __ FmoveLow(result_reg, value_reg);
5324 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5328 __ Move(result_reg, lo_reg, hi_reg);
5332 void LCodeGen::DoAllocate(LAllocate* instr) {
5335 DeferredAllocate(
LCodeGen* codegen, LAllocate* instr)
5338 codegen()->DoDeferredAllocate(instr_);
5340 virtual LInstruction* instr()
OVERRIDE {
return instr_; }
5345 DeferredAllocate* deferred =
5346 new(zone()) DeferredAllocate(
this, instr);
5348 Register result =
ToRegister(instr->result());
5349 Register scratch =
ToRegister(instr->temp1());
5350 Register scratch2 =
ToRegister(instr->temp2());
5354 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5357 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5358 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5359 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5361 }
else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5362 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5365 if (instr->size()->IsConstantOperand()) {
5368 __ Allocate(
size, result, scratch, scratch2, deferred->entry(),
flags);
5370 __ jmp(deferred->entry());
5374 __ Allocate(
size, result, scratch, scratch2, deferred->entry(),
flags);
5377 __ bind(deferred->exit());
5379 if (instr->hydrogen()->MustPrefillWithFiller()) {
5381 if (instr->size()->IsConstantOperand()) {
5387 __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5391 __ Daddu(at, result, Operand(scratch));
5393 __ Branch(&loop,
ge, scratch, Operand(zero_reg));
5399 Register result =
ToRegister(instr->result());
5404 __ mov(result, zero_reg);
5406 PushSafepointRegistersScope
scope(
this);
5407 if (instr->size()->IsRegister()) {
5419 __ stop(
"invalid allocation size");
5425 instr->hydrogen()->MustAllocateDoubleAligned());
5426 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5427 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5428 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5430 }
else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5431 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5440 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5441 __ StoreToSafepointRegisterSlot(v0, result);
5445 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5449 CallRuntime(Runtime::kToFastProperties, 1, instr);
5453 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5461 int literal_offset =
5463 __ li(a7, instr->hydrogen()->literals());
5465 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5466 __ Branch(&materialized,
ne, a1, Operand(at));
5470 __ li(a6, Operand(
Smi::FromInt(instr->hydrogen()->literal_index())));
5471 __ li(a5, Operand(instr->hydrogen()->pattern()));
5472 __ li(a4, Operand(instr->hydrogen()->flags()));
5474 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5477 __ bind(&materialized);
5479 Label allocated, runtime_allocate;
5484 __ bind(&runtime_allocate);
5487 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5490 __ bind(&allocated);
5506 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5510 bool pretenure = instr->hydrogen()->pretenure();
5511 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5512 FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
5513 instr->hydrogen()->kind());
5514 __ li(a2, Operand(instr->hydrogen()->shared_info()));
5517 __ li(a2, Operand(instr->hydrogen()->shared_info()));
5518 __ li(a1, Operand(pretenure ? factory()->true_value()
5519 : factory()->false_value()));
5526 void LCodeGen::DoTypeof(LTypeof* instr) {
5534 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5538 Operand cmp2 = Operand(
no_reg);
5541 instr->FalseLabel(chunk_),
5543 instr->type_literal(),
5548 DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
5551 EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5559 Handle<String> type_name,
5567 Factory* factory = isolate()->factory();
5569 __ JumpIfSmi(input, true_label);
5571 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5573 *cmp2 = Operand(at);
5574 final_branch_condition =
eq;
5576 }
else if (
String::Equals(type_name, factory->string_string())) {
5577 __ JumpIfSmi(input, false_label);
5578 __ GetObjectType(input, input, scratch);
5586 *cmp2 = Operand(zero_reg);
5587 final_branch_condition =
eq;
5589 }
else if (
String::Equals(type_name, factory->symbol_string())) {
5590 __ JumpIfSmi(input, false_label);
5591 __ GetObjectType(input, input, scratch);
5594 final_branch_condition =
eq;
5596 }
else if (
String::Equals(type_name, factory->boolean_string())) {
5597 __ LoadRoot(at, Heap::kTrueValueRootIndex);
5599 __ LoadRoot(at, Heap::kFalseValueRootIndex);
5601 *cmp2 = Operand(input);
5602 final_branch_condition =
eq;
5604 }
else if (
String::Equals(type_name, factory->undefined_string())) {
5605 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5609 __ JumpIfSmi(input, false_label);
5615 *cmp2 = Operand(zero_reg);
5616 final_branch_condition =
ne;
5618 }
else if (
String::Equals(type_name, factory->function_string())) {
5620 __ JumpIfSmi(input, false_label);
5621 __ GetObjectType(input, scratch, input);
5625 final_branch_condition =
eq;
5627 }
else if (
String::Equals(type_name, factory->object_string())) {
5628 __ JumpIfSmi(input, false_label);
5629 __ LoadRoot(at, Heap::kNullValueRootIndex);
5631 Register
map = input;
5632 __ GetObjectType(input,
map, scratch);
5633 __ Branch(false_label,
5642 *cmp2 = Operand(zero_reg);
5643 final_branch_condition =
eq;
5647 *cmp2 = Operand(zero_reg);
5648 __ Branch(false_label);
5651 return final_branch_condition;
5655 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5666 DCHECK(!temp1.is(temp2));
5671 Label check_frame_marker;
5673 __ Branch(&check_frame_marker,
ne, temp2,
5678 __ bind(&check_frame_marker);
5684 if (!info()->IsStub()) {
5687 int current_pc = masm()->pc_offset();
5688 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5689 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5691 while (padding_size > 0) {
5697 last_lazy_deopt_pc_ = masm()->pc_offset();
5701 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5702 last_lazy_deopt_pc_ = masm()->pc_offset();
5703 DCHECK(instr->HasEnvironment());
5706 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5710 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5720 DeoptimizeIf(
al, instr, type, instr->hydrogen()->reason(), zero_reg,
5725 void LCodeGen::DoDummy(LDummy* instr) {
5730 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5736 PushSafepointRegistersScope
scope(
this);
5738 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5741 DCHECK(instr->HasEnvironment());
5743 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5747 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5750 DeferredStackCheck(
LCodeGen* codegen, LStackCheck* instr)
5753 codegen()->DoDeferredStackCheck(instr_);
5755 virtual LInstruction* instr()
OVERRIDE {
return instr_; }
5757 LStackCheck* instr_;
5760 DCHECK(instr->HasEnvironment());
5764 if (instr->hydrogen()->is_function_entry()) {
5767 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5768 __ Branch(&done,
hs,
sp, Operand(at));
5769 DCHECK(instr->context()->IsRegister());
5771 CallCode(isolate()->builtins()->StackCheck(),
5776 DCHECK(instr->hydrogen()->is_backwards_branch());
5778 DeferredStackCheck* deferred_stack_check =
5779 new(zone()) DeferredStackCheck(
this, instr);
5780 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5781 __ Branch(deferred_stack_check->entry(),
lo,
sp, Operand(at));
5783 __ bind(instr->done_label());
5784 deferred_stack_check->SetExit(instr->done_label());
5793 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5801 DCHECK(!environment->HasBeenRegistered());
5808 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5809 Register result =
ToRegister(instr->result());
5810 Register
object =
ToRegister(instr->object());
5811 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5814 Register null_value = a5;
5815 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5822 __ GetObjectType(
object, a1, a1);
5826 Label use_cache, call_runtime;
5828 __ CheckEnumCache(null_value, &call_runtime);
5831 __ Branch(&use_cache);
5834 __ bind(&call_runtime);
5836 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5840 __ LoadRoot(at, Heap::kMetaMapRootIndex);
5842 __ bind(&use_cache);
5846 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5848 Register result =
ToRegister(instr->result());
5849 Label load_cache, done;
5850 __ EnumLength(result,
map);
5852 __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5855 __ bind(&load_cache);
5856 __ LoadInstanceDescriptors(
map, result);
5867 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5868 Register
object =
ToRegister(instr->value());
5879 PushSafepointRegistersScope
scope(
this);
5881 __ mov(
cp, zero_reg);
5882 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5884 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5885 __ StoreToSafepointRegisterSlot(v0, result);
5889 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5892 DeferredLoadMutableDouble(
LCodeGen* codegen,
5893 LLoadFieldByIndex* instr,
5904 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5906 virtual LInstruction* instr()
OVERRIDE {
return instr_; }
5908 LLoadFieldByIndex* instr_;
5914 Register
object =
ToRegister(instr->object());
5916 Register result =
ToRegister(instr->result());
5919 DeferredLoadMutableDouble* deferred;
5920 deferred =
new(zone()) DeferredLoadMutableDouble(
5921 this, instr, result,
object, index);
5923 Label out_of_object, done;
5926 __ Branch(deferred->entry(),
ne, scratch, Operand(zero_reg));
5927 __ dsra(index, index, 1);
5931 __ Daddu(scratch,
object, scratch);
5936 __ bind(&out_of_object);
5939 __ Dsubu(scratch, result, scratch);
5942 __ bind(deferred->exit());
5947 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5948 Register context =
ToRegister(instr->context());
5953 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5954 Handle<ScopeInfo> scope_info = instr->scope_info();
5955 __ li(at, scope_info);
5957 CallRuntime(Runtime::kPushBlockContext, 2, instr);
An object reference managed by the v8 garbage collector.
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
static const int kLengthOffset
static const int kInstrSize
friend class BlockTrampolinePoolScope
static U update(U previous, T value)
static const int kValueOffset
static const int kHeaderSize
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
static const int kEnumCacheOffset
virtual void BeforeCall(int call_size) const OVERRIDE
virtual ~SafepointGenerator()
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
virtual void AfterCall() const OVERRIDE
static const int kHeaderSize
static int OffsetOfElementAt(int index)
static int SizeFor(int length)
static const int kGlobalProxyOffset
@ kAllUsesTruncatingToInt32
static Handle< T > cast(Handle< S > that)
static const uint32_t kSignMask
static const int kValueOffset
static const int kExponentBits
static const int kExponentBias
static const int kExponentShift
static const int kMapOffset
static const int kValueOffset
static const int kCacheStampOffset
static const int kSharedFunctionInfoOffset
static const int kContextOffset
static const int kCodeEntryOffset
static const int kPrototypeOrInitialMapOffset
static const int kHeaderSize
static const int kPropertiesOffset
static const int kInObjectFieldCount
static const int kFunctionOffset
bool IsNextEmittedBlock(int block_id) const
void RestoreCallerDoubles()
void DoStoreKeyedFixedArray(LStoreKeyed *instr)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void RecordSafepointWithRegisters(LPointerMap *pointers, int arguments, Safepoint::DeoptMode mode)
@ RECORD_SIMPLE_SAFEPOINT
@ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
void EmitBranchF(InstrType instr, Condition condition, FPURegister src1, FPURegister src2)
int inlined_function_count_
bool IsSmi(LConstantOperand *op) const
friend class SafepointGenerator
TranslationBuffer translations_
MemOperand BuildSeqStringOperand(Register string, LOperand *index, String::Encoding encoding)
Condition EmitIsString(Register input, Register temp1, Label *is_not_string, SmiCheck check_needed)
DwVfpRegister EmitLoadDoubleRegister(LOperand *op, SwVfpRegister flt_scratch, DwVfpRegister dbl_scratch)
void DoDeferredStackCheck(LStackCheck *instr)
SafepointTableBuilder safepoints_
void EmitVectorLoadICRegisters(T *instr)
static Condition TokenToCondition(Token::Value op, bool is_unsigned)
ZoneList< Handle< Object > > deoptimization_literals_
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check, Label *bool_load)
MemOperand PrepareKeyedOperand(Register key, Register base, bool key_is_constant, int constant_key, int element_size, int shift_size, int base_offset)
void PopulateDeoptimizationLiteralsWithInlinedFunctions()
void AddToTranslation(LEnvironment *environment, Translation *translation, LOperand *op, bool is_tagged, bool is_uint32, int *object_index_pointer, int *dematerialized_index_pointer)
ZoneList< LEnvironment * > deoptimizations_
void EmitIntegerMathAbs(LMathAbs *instr)
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, LInstruction *instr, LOperand *context)
void EmitIsConstructCall(Register temp1, Register temp2)
int32_t ToRepresentation_donotuse(LConstantOperand *op, const Representation &r) const
void EmitFalseBranchF(InstrType instr, Condition condition, FPURegister src1, FPURegister src2)
int32_t ToInteger32(LConstantOperand *op) const
LPlatformChunk * chunk() const
void FinishCode(Handle< Code > code)
int LookupDestination(int block_id) const
Condition EmitTypeofIs(Label *true_label, Label *false_label, Register input, Handle< String > type_name)
void DoDeferredAllocate(LAllocate *instr)
void RecordSafepoint(LPointerMap *pointers, Safepoint::Kind kind, int arguments, Safepoint::DeoptMode mode)
void DoDeferredTaggedToI(LTaggedToI *instr)
LowDwVfpRegister double_scratch0()
void CallCodeGeneric(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, SafepointMode safepoint_mode, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
void CallCode(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
Safepoint::Kind expected_safepoint_kind_
ZoneList< LDeferredCode * > deferred_
bool GenerateDeferredCode()
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
Handle< Object > ToHandle(LConstantOperand *op) const
bool NeedsEagerFrame() const
DoubleRegister double_scratch()
void RegisterEnvironmentForDeoptimization(LEnvironment *environment, Safepoint::DeoptMode mode)
friend class LDeferredCode
void LoadContextFromDeferred(LOperand *context)
void GenerateOsrPrologue()
bool NeedsDeferredFrame() const
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoDeferredLoadMutableDouble(LLoadFieldByIndex *instr, Register result, Register object, Register index)
int DefineDeoptimizationLiteral(Handle< Object > literal)
void DeoptimizeIf(Condition condition, LInstruction *instr, const char *detail, Deoptimizer::BailoutType bailout_type)
int GetStackSlotCount() const
void CallKnownFunction(Handle< JSFunction > function, int formal_parameter_count, int arity, LInstruction *instr, R1State r1_state)
void WriteTranslation(LEnvironment *environment, Translation *translation)
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
void DoLoadKeyedFixedDoubleArray(LLoadKeyed *instr)
bool GenerateSafepointTable()
Operand ToOperand(LOperand *op)
Register EmitLoadRegister(LOperand *op, Register scratch)
void EmitClassOfTest(Label *if_true, Label *if_false, Handle< String > class_name, Register input, Register temporary, Register temporary2)
void DoLoadKeyedExternalArray(LLoadKeyed *instr)
double ToDouble(LConstantOperand *op) const
Register ToRegister(LOperand *op) const
void DoStoreKeyedExternalArray(LStoreKeyed *instr)
void RecordAndWritePosition(int position) OVERRIDE
bool IsInteger32(LConstantOperand *op) const
void PopulateDeoptimizationData(Handle< Code > code)
void DoParallelMove(LParallelMove *move)
Smi * ToSmi(LConstantOperand *op) const
void CallRuntime(const Runtime::Function *function, int num_arguments, LInstruction *instr, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
ZoneList< Deoptimizer::JumpTableEntry > jump_table_
Condition EmitIsObject(Register input, Register temp1, Label *is_not_object, Label *is_object)
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE
void EmitNumberUntagD(LNumberUntagD *instr, Register input, DwVfpRegister result, NumberUntagDMode mode)
MemOperand ToMemOperand(LOperand *op) const
void GenerateBodyInstructionPre(LInstruction *instr) OVERRIDE
MemOperand ToHighMemOperand(LOperand *op) const
void RecordSafepointWithLazyDeopt(LInstruction *instr, SafepointMode safepoint_mode)
void EmitFalseBranch(InstrType instr, Condition condition)
RAStatus GetRAState() const
void DoLoadKeyedFixedArray(LLoadKeyed *instr)
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
void EmitBranch(InstrType instr, Condition condition)
void DoDeferredNumberTagD(LNumberTagD *instr)
void DoStoreKeyedFixedDoubleArray(LStoreKeyed *instr)
friend class LEnvironment
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
static const Register ReceiverRegister()
static const Register NameRegister()
static void GenerateMiss(MacroAssembler *masm)
static const int kIsUndetectable
static const int kBitFieldOffset
static const int kInstanceTypeOffset
static const int kConstructorOffset
static const int kPrototypeOffset
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
static const Register exponent()
static const int kHashFieldOffset
static const int kMaxRegularHeapObjectSize
static void MaybeCallEntryHook(MacroAssembler *masm)
static const int kNoPosition
bool IsSmiOrTagged() const
static Representation Smi()
static Representation Integer32()
int num_parameters() const
Variable * parameter(int index) const
static const int kHeaderSize
static const int kDontAdaptArgumentsSentinel
static const int kInstanceClassNameOffset
static const int kNativeBitWithinByte
static const int kStrictModeBitWithinByte
static const int kMaxValue
static Smi * FromInt(int value)
static const int kFixedFrameSizeFromFp
static const int kContextOffset
static const int kCallerSPOffset
static const int kMarkerOffset
static const int kCallerFPOffset
static const Register ReceiverRegister()
static const Register NameRegister()
static const Register ValueRegister()
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const unsigned int kContainsCachedArrayIndexMask
static const int32_t kMaxOneByteCharCode
static const int kLengthOffset
bool Equals(String *other)
static const Register VectorRegister()
static const Register SlotRegister()
static const ArchVariants kArchVariant
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric literals(0o77, 0b11)") DEFINE_BOOL(harmony_object_literals
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array shift
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
@ PRETENURE_OLD_POINTER_SPACE
@ PRETENURE_OLD_DATA_SPACE
bool IsPowerOfTwo32(uint32_t value)
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
int WhichPowerOf2(uint32_t x)
Vector< const char > CStrVector(const char *data)
const uint32_t kStringEncodingMask
MemOperand ContextOperand(Register context, int index)
Condition CommuteCondition(Condition cond)
bool EvalComparison(Token::Value op, double op1, double op2)
@ kCheckForInexactConversion
const uint32_t kTwoByteStringTag
DwVfpRegister DoubleRegister
@ STORE_TO_INITIALIZED_ENTRY
const int kPointerSizeLog2
@ LAST_NONCALLABLE_SPEC_OBJECT_TYPE
@ NUM_OF_CALLABLE_SPEC_OBJECT_TYPES
@ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE
@ EXTERNAL_UINT16_ELEMENTS
@ EXTERNAL_INT16_ELEMENTS
@ EXTERNAL_UINT8_ELEMENTS
@ EXTERNAL_INT32_ELEMENTS
@ FAST_HOLEY_DOUBLE_ELEMENTS
@ SLOPPY_ARGUMENTS_ELEMENTS
@ EXTERNAL_FLOAT32_ELEMENTS
@ EXTERNAL_FLOAT64_ELEMENTS
@ FAST_HOLEY_SMI_ELEMENTS
@ EXTERNAL_UINT32_ELEMENTS
@ EXTERNAL_UINT8_CLAMPED_ELEMENTS
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
const uint32_t kOneByteStringTag
int ElementsKindToShiftSize(ElementsKind elements_kind)
MemOperand FieldMemOperand(Register object, int offset)
int32_t WhichPowerOf2Abs(int32_t x)
int StackSlotOffset(int index)
bool IsFastPackedElementsKind(ElementsKind kind)
@ NUMBER_CANDIDATE_IS_SMI
@ NUMBER_CANDIDATE_IS_ANY_TAGGED
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
AllocationSiteOverrideMode
@ DISABLE_ALLOCATION_SITES
Condition NegateCondition(Condition cond)
static InstanceType TestType(HHasInstanceTypeAndBranch *instr)
const uint32_t kStringRepresentationMask
static Condition BranchCondition(HHasInstanceTypeAndBranch *instr)
static bool SmiValuesAre32Bits()
static int ArgumentsOffsetWithoutFrame(int index)
static Condition ComputeCompareCondition(Token::Value op)
static const char * LabelType(LLabel *label)
MemOperand GlobalObjectOperand()
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const intptr_t kSmiTagMask
@ NO_CALL_CONSTRUCTOR_FLAGS
bool IsFastSmiElementsKind(ElementsKind kind)
const uint32_t kHoleNanLower32
const uint32_t kSlotsZapValue
FPURegister FloatRegister
const uint32_t kHoleNanUpper32
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Debugger support for the V8 JavaScript engine.
bool is(DwVfpRegister reg) const
static DwVfpRegister FromAllocationIndex(int index)
SwVfpRegister low() const
static Register FromAllocationIndex(int index)
bool is(Register reg) const
#define T(name, string, precedence)