14 class IA32OperandGenerator
FINAL :
public OperandGenerator {
21 return UseFixed(node,
edx);
25 switch (node->opcode()) {
26 case IrOpcode::kInt32Constant:
27 case IrOpcode::kNumberConstant:
28 case IrOpcode::kExternalConstant:
30 case IrOpcode::kHeapConstant: {
34 return !isolate()->heap()->InNewSpace(*value.
handle());
42 return !selector()->IsLive(node);
55 if (index_imm.HasValue()) {
56 int32_t displacement = index_imm.Value();
59 if (!base_imm.HasValue()) {
62 displacement += base_imm.Value();
70 if (displacement == 0) {
83 if (!base_imm.HasValue()) {
86 displacement += base_imm.Value();
89 if (displacement != 0) {
109 if (
mode_ == kMode_M1) {
111 }
else if (
mode_ == kMode_M1I) {
119 return static_cast<AddressingMode>(
static_cast<int>(one) + power);
123 size_t input_count = 0;
146 void InstructionSelector::VisitLoad(Node* node) {
149 Node* base = node->InputAt(0);
150 Node* index = node->InputAt(1);
163 opcode = typ ==
kTypeInt32 ? kIA32Movsxbl : kIA32Movzxbl;
166 opcode = typ ==
kTypeInt32 ? kIA32Movsxwl : kIA32Movzxwl;
177 IA32OperandGenerator g(
this);
178 AddressingModeMatcher matcher(&g, base, index);
180 InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
182 size_t input_count = matcher.SetInputs(inputs);
183 Emit(code, 1, outputs, input_count, inputs);
187 void InstructionSelector::VisitStore(Node* node) {
188 IA32OperandGenerator g(
this);
189 Node* base = node->InputAt(0);
190 Node* index = node->InputAt(1);
191 Node* value = node->InputAt(2);
193 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
200 InstructionOperand* temps[] = {g.TempRegister(
ecx), g.TempRegister(
edx)};
201 Emit(kIA32StoreWriteBarrier,
NULL, g.UseFixed(base,
ebx),
232 InstructionOperand* val;
233 if (g.CanBeImmediate(value)) {
234 val = g.UseImmediate(value);
236 val = g.UseByteRegister(value);
238 val = g.UseRegister(value);
241 AddressingModeMatcher matcher(&g, base, index);
244 size_t input_count = matcher.SetInputs(inputs);
245 inputs[input_count++] = val;
246 Emit(code, 0,
static_cast<InstructionOperand**
>(
NULL), input_count, inputs);
251 static void VisitBinop(InstructionSelector* selector, Node* node,
253 IA32OperandGenerator g(selector);
255 Node* left = m.left().node();
256 Node* right = m.right().node();
258 size_t input_count = 0;
260 size_t output_count = 0;
263 if (g.CanBeImmediate(right)) {
264 inputs[input_count++] = g.Use(left);
265 inputs[input_count++] = g.UseImmediate(right);
268 g.CanBeBetterLeftOperand(right)) {
269 std::swap(left, right);
271 inputs[input_count++] = g.UseRegister(left);
272 inputs[input_count++] = g.Use(right);
275 if (cont->IsBranch()) {
276 inputs[input_count++] = g.Label(cont->true_block());
277 inputs[input_count++] = g.Label(cont->false_block());
280 outputs[output_count++] = g.DefineSameAsFirst(node);
283 outputs[output_count++] = g.DefineAsRegister(cont->result());
291 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
292 outputs, input_count, inputs);
298 static void VisitBinop(InstructionSelector* selector, Node* node,
300 FlagsContinuation cont;
305 void InstructionSelector::VisitWord32And(Node* node) {
310 void InstructionSelector::VisitWord32Or(Node* node) {
315 void InstructionSelector::VisitWord32Xor(Node* node) {
316 IA32OperandGenerator g(
this);
318 if (m.right().Is(-1)) {
319 Emit(kIA32Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
327 static inline void VisitShift(InstructionSelector* selector, Node* node,
329 IA32OperandGenerator g(selector);
330 Node* left = node->InputAt(0);
331 Node* right = node->InputAt(1);
334 if (g.CanBeImmediate(right)) {
335 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
336 g.UseImmediate(right));
339 if (m.right().IsWord32And()) {
341 if (mright.right().Is(0x1F)) {
342 right = mright.left().node();
345 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
346 g.UseFixed(right,
ecx));
351 void InstructionSelector::VisitWord32Shl(Node* node) {
356 void InstructionSelector::VisitWord32Shr(Node* node) {
361 void InstructionSelector::VisitWord32Sar(Node* node) {
366 void InstructionSelector::VisitWord32Ror(Node* node) {
371 void InstructionSelector::VisitInt32Add(Node* node) {
376 void InstructionSelector::VisitInt32Sub(Node* node) {
377 IA32OperandGenerator g(
this);
379 if (m.left().Is(0)) {
380 Emit(kIA32Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
387 void InstructionSelector::VisitInt32Mul(Node* node) {
388 IA32OperandGenerator g(
this);
390 Node* left = m.left().node();
391 Node* right = m.right().node();
392 if (g.CanBeImmediate(right)) {
393 Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
394 g.UseImmediate(right));
396 if (g.CanBeBetterLeftOperand(right)) {
397 std::swap(left, right);
399 Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
405 static inline void VisitDiv(InstructionSelector* selector, Node* node,
407 IA32OperandGenerator g(selector);
410 selector->Emit(opcode, g.DefineAsFixed(node,
eax),
411 g.UseFixed(node->InputAt(0),
eax),
412 g.UseUnique(node->InputAt(1)), temp_count, temps);
416 void InstructionSelector::VisitInt32Div(Node* node) {
421 void InstructionSelector::VisitInt32UDiv(Node* node) {
426 static inline void VisitMod(InstructionSelector* selector, Node* node,
428 IA32OperandGenerator g(selector);
431 selector->Emit(opcode, g.DefineAsFixed(node,
edx),
432 g.UseFixed(node->InputAt(0),
eax),
433 g.UseUnique(node->InputAt(1)), temp_count, temps);
437 void InstructionSelector::VisitInt32Mod(Node* node) {
442 void InstructionSelector::VisitInt32UMod(Node* node) {
447 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
448 IA32OperandGenerator g(
this);
450 Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
454 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
455 IA32OperandGenerator g(
this);
456 Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
460 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
461 IA32OperandGenerator g(
this);
463 Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node),
464 g.UseRegister(node->InputAt(0)));
468 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
469 IA32OperandGenerator g(
this);
470 Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
474 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
475 IA32OperandGenerator g(
this);
476 Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
480 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
481 IA32OperandGenerator g(
this);
483 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
487 void InstructionSelector::VisitFloat64Add(Node* node) {
488 IA32OperandGenerator g(
this);
489 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
490 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
494 void InstructionSelector::VisitFloat64Sub(Node* node) {
495 IA32OperandGenerator g(
this);
496 Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
497 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
501 void InstructionSelector::VisitFloat64Mul(Node* node) {
502 IA32OperandGenerator g(
this);
503 Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
504 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
508 void InstructionSelector::VisitFloat64Div(Node* node) {
509 IA32OperandGenerator g(
this);
510 Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
511 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
515 void InstructionSelector::VisitFloat64Mod(Node* node) {
516 IA32OperandGenerator g(
this);
517 InstructionOperand* temps[] = {g.TempRegister(
eax)};
518 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
519 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
524 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
525 IA32OperandGenerator g(
this);
526 Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
530 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
531 FlagsContinuation* cont) {
536 void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
537 FlagsContinuation* cont) {
547 FlagsContinuation* cont) {
548 IA32OperandGenerator g(selector);
549 if (cont->IsBranch()) {
550 selector->Emit(cont->Encode(opcode),
NULL, left, right,
551 g.Label(cont->true_block()),
552 g.Label(cont->false_block()))->MarkAsControl();
556 selector->Emit(cont->Encode(opcode), g.DefineAsRegister(cont->result()),
565 FlagsContinuation* cont,
bool commutative) {
566 IA32OperandGenerator g(selector);
567 Node* left = node->InputAt(0);
568 Node* right = node->InputAt(1);
571 if (g.CanBeImmediate(right)) {
572 VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
573 }
else if (g.CanBeImmediate(left)) {
574 if (!commutative) cont->Commute();
575 VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
577 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
582 void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
583 switch (node->opcode()) {
584 case IrOpcode::kInt32Sub:
586 case IrOpcode::kWord32And:
592 IA32OperandGenerator g(
this);
593 VisitCompare(
this, kIA32Test, g.Use(node), g.TempImmediate(-1), cont);
597 void InstructionSelector::VisitWord32Compare(Node* node,
598 FlagsContinuation* cont) {
603 void InstructionSelector::VisitFloat64Compare(Node* node,
604 FlagsContinuation* cont) {
605 IA32OperandGenerator g(
this);
606 Node* left = node->InputAt(0);
607 Node* right = node->InputAt(1);
608 VisitCompare(
this, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right), cont);
612 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
613 BasicBlock* deoptimization) {
614 IA32OperandGenerator g(
this);
615 CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
617 FrameStateDescriptor* frame_state_descriptor =
NULL;
619 if (descriptor->NeedsFrameState()) {
620 frame_state_descriptor =
621 GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
624 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
627 InitializeCallBuffer(call, &buffer,
true,
true);
631 input != buffer.pushed_nodes.rend(); input++) {
633 Emit(kIA32Push,
NULL,
634 g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
639 switch (descriptor->kind()) {
640 case CallDescriptor::kCallCodeObject: {
641 opcode = kArchCallCodeObject;
644 case CallDescriptor::kCallJSFunction:
645 opcode = kArchCallJSFunction;
654 Instruction* call_instr =
655 Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
656 buffer.instruction_args.size(), &buffer.instruction_args.front());
658 call_instr->MarkAsCall();
659 if (deoptimization !=
NULL) {
661 call_instr->MarkAsControl();
static uint32_t encode(T value)
Handle< T > handle() const
InstructionOperand * base_operand_
InstructionOperand * index_operand_
static const int kMaxInputCount
AddressingMode GetMode(AddressingMode one, int power)
AddressingModeMatcher(IA32OperandGenerator *g, Node *base, Node *index)
InstructionOperand * displacement_operand_
size_t SetInputs(InstructionOperand **inputs)
bool CanBeBetterLeftOperand(Node *node) const
InstructionOperand * UseByteRegister(Node *node)
IA32OperandGenerator(InstructionSelector *selector)
bool CanBeImmediate(Node *node)
Instruction * MarkAsControl()
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
static void VisitBinop(InstructionSelector *selector, Node *node, InstructionCode opcode, InstructionCode reverse_opcode, FlagsContinuation *cont)
static void VisitDiv(InstructionSelector *selector, Node *node, ArchOpcode div_opcode, ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode)
BinopMatcher< Int32Matcher, Int32Matcher > Int32BinopMatcher
MachineType TypeOf(MachineType machine_type)
static void VisitWordCompare(InstructionSelector *selector, Node *node, InstructionCode opcode, FlagsContinuation *cont, bool commutative)
MachineType RepresentationOf(MachineType machine_type)
static void VisitMod(InstructionSelector *selector, Node *node, ArchOpcode div_opcode, ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode)
static void VisitShift(InstructionSelector *selector, Node *node, TryMatchShift try_match_shift, FlagsContinuation *cont)
NodeVector::reverse_iterator NodeVectorRIter
IntMatcher< int32_t, IrOpcode::kInt32Constant > Int32Matcher
static void VisitCompare(InstructionSelector *selector, InstructionCode opcode, InstructionOperand *left, InstructionOperand *right, FlagsContinuation *cont)
Debugger support for the V8 JavaScript engine.