V8 Project
instruction-selector-arm64.cc
Go to the documentation of this file.
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
7 
8 namespace v8 {
9 namespace internal {
10 namespace compiler {
11 
13  kArithmeticImm, // 12 bit unsigned immediate shifted left 0 or 12 bits
14  kShift32Imm, // 0 - 31
15  kShift64Imm, // 0 - 63
18  kLoadStoreImm8, // signed 8 bit or 12 bit unsigned scaled by access size
23 };
24 
25 
26 // Adds Arm64-specific methods for generating operands.
27 class Arm64OperandGenerator FINAL : public OperandGenerator {
28  public:
29  explicit Arm64OperandGenerator(InstructionSelector* selector)
30  : OperandGenerator(selector) {}
31 
33  if (CanBeImmediate(node, mode)) {
34  return UseImmediate(node);
35  }
36  return UseRegister(node);
37  }
38 
39  bool CanBeImmediate(Node* node, ImmediateMode mode) {
40  int64_t value;
41  if (node->opcode() == IrOpcode::kInt32Constant)
42  value = OpParameter<int32_t>(node);
43  else if (node->opcode() == IrOpcode::kInt64Constant)
44  value = OpParameter<int64_t>(node);
45  else
46  return false;
47  unsigned ignored;
48  switch (mode) {
49  case kLogical32Imm:
50  // TODO(dcarney): some unencodable values can be handled by
51  // switching instructions.
52  return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
53  &ignored, &ignored, &ignored);
54  case kLogical64Imm:
55  return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
56  &ignored, &ignored, &ignored);
57  case kArithmeticImm:
58  // TODO(dcarney): -values can be handled by instruction swapping
59  return Assembler::IsImmAddSub(value);
60  case kShift32Imm:
61  return 0 <= value && value < 32;
62  case kShift64Imm:
63  return 0 <= value && value < 64;
64  case kLoadStoreImm8:
65  return IsLoadStoreImmediate(value, LSByte);
66  case kLoadStoreImm16:
67  return IsLoadStoreImmediate(value, LSHalfword);
68  case kLoadStoreImm32:
69  return IsLoadStoreImmediate(value, LSWord);
70  case kLoadStoreImm64:
71  return IsLoadStoreImmediate(value, LSDoubleWord);
72  case kNoImmediate:
73  return false;
74  }
75  return false;
76  }
77 
78  private:
79  bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
80  return Assembler::IsImmLSScaled(value, size) ||
82  }
83 };
84 
85 
86 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
87  Node* node) {
88  Arm64OperandGenerator g(selector);
89  selector->Emit(opcode, g.DefineAsRegister(node),
90  g.UseRegister(node->InputAt(0)),
91  g.UseRegister(node->InputAt(1)));
92 }
93 
94 
95 static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
96  Node* node) {
97  Arm64OperandGenerator g(selector);
98  selector->Emit(opcode, g.DefineAsRegister(node),
99  g.UseRegister(node->InputAt(0)),
100  g.UseRegister(node->InputAt(1)));
101 }
102 
103 
104 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
105  Node* node, ImmediateMode operand_mode) {
106  Arm64OperandGenerator g(selector);
107  selector->Emit(opcode, g.DefineAsRegister(node),
108  g.UseRegister(node->InputAt(0)),
109  g.UseOperand(node->InputAt(1), operand_mode));
110 }
111 
112 
113 // Shared routine for multiple binary operations.
114 template <typename Matcher>
115 static void VisitBinop(InstructionSelector* selector, Node* node,
116  InstructionCode opcode, ImmediateMode operand_mode,
117  FlagsContinuation* cont) {
118  Arm64OperandGenerator g(selector);
119  Matcher m(node);
120  InstructionOperand* inputs[4];
121  size_t input_count = 0;
122  InstructionOperand* outputs[2];
123  size_t output_count = 0;
124 
125  inputs[input_count++] = g.UseRegister(m.left().node());
126  inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
127 
128  if (cont->IsBranch()) {
129  inputs[input_count++] = g.Label(cont->true_block());
130  inputs[input_count++] = g.Label(cont->false_block());
131  }
132 
133  outputs[output_count++] = g.DefineAsRegister(node);
134  if (cont->IsSet()) {
135  outputs[output_count++] = g.DefineAsRegister(cont->result());
136  }
137 
138  DCHECK_NE(0, input_count);
139  DCHECK_NE(0, output_count);
140  DCHECK_GE(arraysize(inputs), input_count);
141  DCHECK_GE(arraysize(outputs), output_count);
142 
143  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
144  outputs, input_count, inputs);
145  if (cont->IsBranch()) instr->MarkAsControl();
146 }
147 
148 
149 // Shared routine for multiple binary operations.
150 template <typename Matcher>
151 static void VisitBinop(InstructionSelector* selector, Node* node,
152  ArchOpcode opcode, ImmediateMode operand_mode) {
153  FlagsContinuation cont;
154  VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
155 }
156 
157 
158 void InstructionSelector::VisitLoad(Node* node) {
159  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
160  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
161  Arm64OperandGenerator g(this);
162  Node* base = node->InputAt(0);
163  Node* index = node->InputAt(1);
164  ArchOpcode opcode;
165  ImmediateMode immediate_mode = kNoImmediate;
166  switch (rep) {
167  case kRepFloat32:
168  opcode = kArm64LdrS;
169  immediate_mode = kLoadStoreImm32;
170  break;
171  case kRepFloat64:
172  opcode = kArm64LdrD;
173  immediate_mode = kLoadStoreImm64;
174  break;
175  case kRepBit: // Fall through.
176  case kRepWord8:
177  opcode = typ == kTypeInt32 ? kArm64Ldrsb : kArm64Ldrb;
178  immediate_mode = kLoadStoreImm8;
179  break;
180  case kRepWord16:
181  opcode = typ == kTypeInt32 ? kArm64Ldrsh : kArm64Ldrh;
182  immediate_mode = kLoadStoreImm16;
183  break;
184  case kRepWord32:
185  opcode = kArm64LdrW;
186  immediate_mode = kLoadStoreImm32;
187  break;
188  case kRepTagged: // Fall through.
189  case kRepWord64:
190  opcode = kArm64Ldr;
191  immediate_mode = kLoadStoreImm64;
192  break;
193  default:
194  UNREACHABLE();
195  return;
196  }
197  if (g.CanBeImmediate(index, immediate_mode)) {
198  Emit(opcode | AddressingModeField::encode(kMode_MRI),
199  g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
200  } else {
201  Emit(opcode | AddressingModeField::encode(kMode_MRR),
202  g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
203  }
204 }
205 
206 
207 void InstructionSelector::VisitStore(Node* node) {
208  Arm64OperandGenerator g(this);
209  Node* base = node->InputAt(0);
210  Node* index = node->InputAt(1);
211  Node* value = node->InputAt(2);
212 
213  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
214  MachineType rep = RepresentationOf(store_rep.machine_type());
215  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
216  DCHECK(rep == kRepTagged);
217  // TODO(dcarney): refactor RecordWrite function to take temp registers
218  // and pass them here instead of using fixed regs
219  // TODO(dcarney): handle immediate indices.
220  InstructionOperand* temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
221  Emit(kArm64StoreWriteBarrier, NULL, g.UseFixed(base, x10),
222  g.UseFixed(index, x11), g.UseFixed(value, x12), arraysize(temps),
223  temps);
224  return;
225  }
226  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
227  ArchOpcode opcode;
228  ImmediateMode immediate_mode = kNoImmediate;
229  switch (rep) {
230  case kRepFloat32:
231  opcode = kArm64StrS;
232  immediate_mode = kLoadStoreImm32;
233  break;
234  case kRepFloat64:
235  opcode = kArm64StrD;
236  immediate_mode = kLoadStoreImm64;
237  break;
238  case kRepBit: // Fall through.
239  case kRepWord8:
240  opcode = kArm64Strb;
241  immediate_mode = kLoadStoreImm8;
242  break;
243  case kRepWord16:
244  opcode = kArm64Strh;
245  immediate_mode = kLoadStoreImm16;
246  break;
247  case kRepWord32:
248  opcode = kArm64StrW;
249  immediate_mode = kLoadStoreImm32;
250  break;
251  case kRepTagged: // Fall through.
252  case kRepWord64:
253  opcode = kArm64Str;
254  immediate_mode = kLoadStoreImm64;
255  break;
256  default:
257  UNREACHABLE();
258  return;
259  }
260  if (g.CanBeImmediate(index, immediate_mode)) {
261  Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
262  g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
263  } else {
264  Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL,
265  g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
266  }
267 }
268 
269 
270 template <typename Matcher>
271 static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
272  ArchOpcode opcode, bool left_can_cover,
273  bool right_can_cover, ImmediateMode imm_mode) {
274  Arm64OperandGenerator g(selector);
275 
276  // Map instruction to equivalent operation with inverted right input.
277  ArchOpcode inv_opcode = opcode;
278  switch (opcode) {
279  case kArm64And32:
280  inv_opcode = kArm64Bic32;
281  break;
282  case kArm64And:
283  inv_opcode = kArm64Bic;
284  break;
285  case kArm64Or32:
286  inv_opcode = kArm64Orn32;
287  break;
288  case kArm64Or:
289  inv_opcode = kArm64Orn;
290  break;
291  case kArm64Eor32:
292  inv_opcode = kArm64Eon32;
293  break;
294  case kArm64Eor:
295  inv_opcode = kArm64Eon;
296  break;
297  default:
298  UNREACHABLE();
299  }
300 
301  // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
302  if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
303  Matcher mleft(m->left().node());
304  if (mleft.right().Is(-1)) {
305  // TODO(all): support shifted operand on right.
306  selector->Emit(inv_opcode, g.DefineAsRegister(node),
307  g.UseRegister(m->right().node()),
308  g.UseRegister(mleft.left().node()));
309  return;
310  }
311  }
312 
313  // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
314  if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
315  right_can_cover) {
316  Matcher mright(m->right().node());
317  if (mright.right().Is(-1)) {
318  // TODO(all): support shifted operand on right.
319  selector->Emit(inv_opcode, g.DefineAsRegister(node),
320  g.UseRegister(m->left().node()),
321  g.UseRegister(mright.left().node()));
322  return;
323  }
324  }
325 
326  if (m->IsWord32Xor() && m->right().Is(-1)) {
327  selector->Emit(kArm64Not32, g.DefineAsRegister(node),
328  g.UseRegister(m->left().node()));
329  } else if (m->IsWord64Xor() && m->right().Is(-1)) {
330  selector->Emit(kArm64Not, g.DefineAsRegister(node),
331  g.UseRegister(m->left().node()));
332  } else {
333  VisitBinop<Matcher>(selector, node, opcode, imm_mode);
334  }
335 }
336 
337 
338 void InstructionSelector::VisitWord32And(Node* node) {
339  Int32BinopMatcher m(node);
340  VisitLogical<Int32BinopMatcher>(
341  this, node, &m, kArm64And32, CanCover(node, m.left().node()),
342  CanCover(node, m.right().node()), kLogical32Imm);
343 }
344 
345 
346 void InstructionSelector::VisitWord64And(Node* node) {
347  Int64BinopMatcher m(node);
348  VisitLogical<Int64BinopMatcher>(
349  this, node, &m, kArm64And, CanCover(node, m.left().node()),
350  CanCover(node, m.right().node()), kLogical64Imm);
351 }
352 
353 
354 void InstructionSelector::VisitWord32Or(Node* node) {
355  Int32BinopMatcher m(node);
356  VisitLogical<Int32BinopMatcher>(
357  this, node, &m, kArm64Or32, CanCover(node, m.left().node()),
358  CanCover(node, m.right().node()), kLogical32Imm);
359 }
360 
361 
362 void InstructionSelector::VisitWord64Or(Node* node) {
363  Int64BinopMatcher m(node);
364  VisitLogical<Int64BinopMatcher>(
365  this, node, &m, kArm64Or, CanCover(node, m.left().node()),
366  CanCover(node, m.right().node()), kLogical64Imm);
367 }
368 
369 
370 void InstructionSelector::VisitWord32Xor(Node* node) {
371  Int32BinopMatcher m(node);
372  VisitLogical<Int32BinopMatcher>(
373  this, node, &m, kArm64Eor32, CanCover(node, m.left().node()),
374  CanCover(node, m.right().node()), kLogical32Imm);
375 }
376 
377 
378 void InstructionSelector::VisitWord64Xor(Node* node) {
379  Int64BinopMatcher m(node);
380  VisitLogical<Int64BinopMatcher>(
381  this, node, &m, kArm64Eor, CanCover(node, m.left().node()),
382  CanCover(node, m.right().node()), kLogical64Imm);
383 }
384 
385 
386 void InstructionSelector::VisitWord32Shl(Node* node) {
387  VisitRRO(this, kArm64Shl32, node, kShift32Imm);
388 }
389 
390 
391 void InstructionSelector::VisitWord64Shl(Node* node) {
392  VisitRRO(this, kArm64Shl, node, kShift64Imm);
393 }
394 
395 
396 void InstructionSelector::VisitWord32Shr(Node* node) {
397  VisitRRO(this, kArm64Shr32, node, kShift32Imm);
398 }
399 
400 
401 void InstructionSelector::VisitWord64Shr(Node* node) {
402  VisitRRO(this, kArm64Shr, node, kShift64Imm);
403 }
404 
405 
406 void InstructionSelector::VisitWord32Sar(Node* node) {
407  VisitRRO(this, kArm64Sar32, node, kShift32Imm);
408 }
409 
410 
411 void InstructionSelector::VisitWord64Sar(Node* node) {
412  VisitRRO(this, kArm64Sar, node, kShift64Imm);
413 }
414 
415 
416 void InstructionSelector::VisitWord32Ror(Node* node) {
417  VisitRRO(this, kArm64Ror32, node, kShift32Imm);
418 }
419 
420 
421 void InstructionSelector::VisitWord64Ror(Node* node) {
422  VisitRRO(this, kArm64Ror, node, kShift64Imm);
423 }
424 
425 
426 void InstructionSelector::VisitInt32Add(Node* node) {
427  Arm64OperandGenerator g(this);
428  Int32BinopMatcher m(node);
429  // Select Madd(x, y, z) for Add(Mul(x, y), z).
430  if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
431  Int32BinopMatcher mleft(m.left().node());
432  Emit(kArm64Madd32, g.DefineAsRegister(node),
433  g.UseRegister(mleft.left().node()),
434  g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
435  return;
436  }
437  // Select Madd(x, y, z) for Add(x, Mul(x, y)).
438  if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
439  Int32BinopMatcher mright(m.right().node());
440  Emit(kArm64Madd32, g.DefineAsRegister(node),
441  g.UseRegister(mright.left().node()),
442  g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
443  return;
444  }
445  VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm);
446 }
447 
448 
449 void InstructionSelector::VisitInt64Add(Node* node) {
450  Arm64OperandGenerator g(this);
451  Int64BinopMatcher m(node);
452  // Select Madd(x, y, z) for Add(Mul(x, y), z).
453  if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
454  Int64BinopMatcher mleft(m.left().node());
455  Emit(kArm64Madd, g.DefineAsRegister(node),
456  g.UseRegister(mleft.left().node()),
457  g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
458  return;
459  }
460  // Select Madd(x, y, z) for Add(x, Mul(x, y)).
461  if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
462  Int64BinopMatcher mright(m.right().node());
463  Emit(kArm64Madd, g.DefineAsRegister(node),
464  g.UseRegister(mright.left().node()),
465  g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
466  return;
467  }
468  VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm);
469 }
470 
471 
472 void InstructionSelector::VisitInt32Sub(Node* node) {
473  Arm64OperandGenerator g(this);
474  Int32BinopMatcher m(node);
475 
476  // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
477  if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
478  Int32BinopMatcher mright(m.right().node());
479  Emit(kArm64Msub32, g.DefineAsRegister(node),
480  g.UseRegister(mright.left().node()),
481  g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
482  return;
483  }
484 
485  if (m.left().Is(0)) {
486  Emit(kArm64Neg32, g.DefineAsRegister(node),
487  g.UseRegister(m.right().node()));
488  } else {
489  VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm);
490  }
491 }
492 
493 
494 void InstructionSelector::VisitInt64Sub(Node* node) {
495  Arm64OperandGenerator g(this);
496  Int64BinopMatcher m(node);
497 
498  // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
499  if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
500  Int64BinopMatcher mright(m.right().node());
501  Emit(kArm64Msub, g.DefineAsRegister(node),
502  g.UseRegister(mright.left().node()),
503  g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
504  return;
505  }
506 
507  if (m.left().Is(0)) {
508  Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
509  } else {
510  VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm);
511  }
512 }
513 
514 
515 void InstructionSelector::VisitInt32Mul(Node* node) {
516  Arm64OperandGenerator g(this);
517  Int32BinopMatcher m(node);
518 
519  if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
520  Int32BinopMatcher mleft(m.left().node());
521 
522  // Select Mneg(x, y) for Mul(Sub(0, x), y).
523  if (mleft.left().Is(0)) {
524  Emit(kArm64Mneg32, g.DefineAsRegister(node),
525  g.UseRegister(mleft.right().node()),
526  g.UseRegister(m.right().node()));
527  return;
528  }
529  }
530 
531  if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) {
532  Int32BinopMatcher mright(m.right().node());
533 
534  // Select Mneg(x, y) for Mul(x, Sub(0, y)).
535  if (mright.left().Is(0)) {
536  Emit(kArm64Mneg32, g.DefineAsRegister(node),
537  g.UseRegister(m.left().node()),
538  g.UseRegister(mright.right().node()));
539  return;
540  }
541  }
542 
543  VisitRRR(this, kArm64Mul32, node);
544 }
545 
546 
547 void InstructionSelector::VisitInt64Mul(Node* node) {
548  Arm64OperandGenerator g(this);
549  Int64BinopMatcher m(node);
550 
551  if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
552  Int64BinopMatcher mleft(m.left().node());
553 
554  // Select Mneg(x, y) for Mul(Sub(0, x), y).
555  if (mleft.left().Is(0)) {
556  Emit(kArm64Mneg, g.DefineAsRegister(node),
557  g.UseRegister(mleft.right().node()),
558  g.UseRegister(m.right().node()));
559  return;
560  }
561  }
562 
563  if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) {
564  Int64BinopMatcher mright(m.right().node());
565 
566  // Select Mneg(x, y) for Mul(x, Sub(0, y)).
567  if (mright.left().Is(0)) {
568  Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
569  g.UseRegister(mright.right().node()));
570  return;
571  }
572  }
573 
574  VisitRRR(this, kArm64Mul, node);
575 }
576 
577 
578 void InstructionSelector::VisitInt32Div(Node* node) {
579  VisitRRR(this, kArm64Idiv32, node);
580 }
581 
582 
583 void InstructionSelector::VisitInt64Div(Node* node) {
584  VisitRRR(this, kArm64Idiv, node);
585 }
586 
587 
588 void InstructionSelector::VisitInt32UDiv(Node* node) {
589  VisitRRR(this, kArm64Udiv32, node);
590 }
591 
592 
593 void InstructionSelector::VisitInt64UDiv(Node* node) {
594  VisitRRR(this, kArm64Udiv, node);
595 }
596 
597 
598 void InstructionSelector::VisitInt32Mod(Node* node) {
599  VisitRRR(this, kArm64Imod32, node);
600 }
601 
602 
603 void InstructionSelector::VisitInt64Mod(Node* node) {
604  VisitRRR(this, kArm64Imod, node);
605 }
606 
607 
608 void InstructionSelector::VisitInt32UMod(Node* node) {
609  VisitRRR(this, kArm64Umod32, node);
610 }
611 
612 
613 void InstructionSelector::VisitInt64UMod(Node* node) {
614  VisitRRR(this, kArm64Umod, node);
615 }
616 
617 
618 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
619  Arm64OperandGenerator g(this);
620  Emit(kArm64Float32ToFloat64, g.DefineAsRegister(node),
621  g.UseRegister(node->InputAt(0)));
622 }
623 
624 
625 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
626  Arm64OperandGenerator g(this);
627  Emit(kArm64Int32ToFloat64, g.DefineAsRegister(node),
628  g.UseRegister(node->InputAt(0)));
629 }
630 
631 
632 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
633  Arm64OperandGenerator g(this);
634  Emit(kArm64Uint32ToFloat64, g.DefineAsRegister(node),
635  g.UseRegister(node->InputAt(0)));
636 }
637 
638 
639 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
640  Arm64OperandGenerator g(this);
641  Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
642  g.UseRegister(node->InputAt(0)));
643 }
644 
645 
646 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
647  Arm64OperandGenerator g(this);
648  Emit(kArm64Float64ToUint32, g.DefineAsRegister(node),
649  g.UseRegister(node->InputAt(0)));
650 }
651 
652 
653 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
654  Arm64OperandGenerator g(this);
655  Emit(kArm64Sxtw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
656 }
657 
658 
659 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
660  Arm64OperandGenerator g(this);
661  Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
662 }
663 
664 
665 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
666  Arm64OperandGenerator g(this);
667  Emit(kArm64Float64ToFloat32, g.DefineAsRegister(node),
668  g.UseRegister(node->InputAt(0)));
669 }
670 
671 
672 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
673  Arm64OperandGenerator g(this);
674  Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
675 }
676 
677 
678 void InstructionSelector::VisitFloat64Add(Node* node) {
679  VisitRRRFloat64(this, kArm64Float64Add, node);
680 }
681 
682 
683 void InstructionSelector::VisitFloat64Sub(Node* node) {
684  VisitRRRFloat64(this, kArm64Float64Sub, node);
685 }
686 
687 
688 void InstructionSelector::VisitFloat64Mul(Node* node) {
689  VisitRRRFloat64(this, kArm64Float64Mul, node);
690 }
691 
692 
693 void InstructionSelector::VisitFloat64Div(Node* node) {
694  VisitRRRFloat64(this, kArm64Float64Div, node);
695 }
696 
697 
698 void InstructionSelector::VisitFloat64Mod(Node* node) {
699  Arm64OperandGenerator g(this);
700  Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
701  g.UseFixed(node->InputAt(0), d0),
702  g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
703 }
704 
705 
706 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
707  Arm64OperandGenerator g(this);
708  Emit(kArm64Float64Sqrt, g.DefineAsRegister(node),
709  g.UseRegister(node->InputAt(0)));
710 }
711 
712 
713 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
714  FlagsContinuation* cont) {
715  VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, cont);
716 }
717 
718 
719 void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
720  FlagsContinuation* cont) {
721  VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, cont);
722 }
723 
724 
725 // Shared routine for multiple compare operations.
726 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
728  FlagsContinuation* cont) {
729  Arm64OperandGenerator g(selector);
730  opcode = cont->Encode(opcode);
731  if (cont->IsBranch()) {
732  selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
733  g.Label(cont->false_block()))->MarkAsControl();
734  } else {
735  DCHECK(cont->IsSet());
736  selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
737  }
738 }
739 
740 
741 // Shared routine for multiple word compare operations.
742 static void VisitWordCompare(InstructionSelector* selector, Node* node,
743  InstructionCode opcode, FlagsContinuation* cont,
744  bool commutative) {
745  Arm64OperandGenerator g(selector);
746  Node* left = node->InputAt(0);
747  Node* right = node->InputAt(1);
748 
749  // Match immediates on left or right side of comparison.
750  if (g.CanBeImmediate(right, kArithmeticImm)) {
751  VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
752  cont);
753  } else if (g.CanBeImmediate(left, kArithmeticImm)) {
754  if (!commutative) cont->Commute();
755  VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
756  cont);
757  } else {
758  VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
759  cont);
760  }
761 }
762 
763 
764 void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
765  switch (node->opcode()) {
766  case IrOpcode::kInt32Add:
767  return VisitWordCompare(this, node, kArm64Cmn32, cont, true);
768  case IrOpcode::kInt32Sub:
769  return VisitWordCompare(this, node, kArm64Cmp32, cont, false);
770  case IrOpcode::kWord32And:
771  return VisitWordCompare(this, node, kArm64Tst32, cont, true);
772  default:
773  break;
774  }
775 
776  Arm64OperandGenerator g(this);
777  VisitCompare(this, kArm64Tst32, g.UseRegister(node), g.UseRegister(node),
778  cont);
779 }
780 
781 
782 void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
783  switch (node->opcode()) {
784  case IrOpcode::kWord64And:
785  return VisitWordCompare(this, node, kArm64Tst, cont, true);
786  default:
787  break;
788  }
789 
790  Arm64OperandGenerator g(this);
791  VisitCompare(this, kArm64Tst, g.UseRegister(node), g.UseRegister(node), cont);
792 }
793 
794 
795 void InstructionSelector::VisitWord32Compare(Node* node,
796  FlagsContinuation* cont) {
797  VisitWordCompare(this, node, kArm64Cmp32, cont, false);
798 }
799 
800 
801 void InstructionSelector::VisitWord64Compare(Node* node,
802  FlagsContinuation* cont) {
803  VisitWordCompare(this, node, kArm64Cmp, cont, false);
804 }
805 
806 
807 void InstructionSelector::VisitFloat64Compare(Node* node,
808  FlagsContinuation* cont) {
809  Arm64OperandGenerator g(this);
810  Node* left = node->InputAt(0);
811  Node* right = node->InputAt(1);
812  VisitCompare(this, kArm64Float64Cmp, g.UseRegister(left),
813  g.UseRegister(right), cont);
814 }
815 
816 
817 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
818  BasicBlock* deoptimization) {
819  Arm64OperandGenerator g(this);
820  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
821 
822  FrameStateDescriptor* frame_state_descriptor = NULL;
823  if (descriptor->NeedsFrameState()) {
824  frame_state_descriptor =
825  GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
826  }
827 
828  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
829 
830  // Compute InstructionOperands for inputs and outputs.
831  // TODO(turbofan): on ARM64 it's probably better to use the code object in a
832  // register if there are multiple uses of it. Improve constant pool and the
833  // heuristics in the register allocator for where to emit constants.
834  InitializeCallBuffer(call, &buffer, true, false);
835 
836  // Push the arguments to the stack.
837  bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
838  int aligned_push_count = buffer.pushed_nodes.size();
839  // TODO(dcarney): claim and poke probably take small immediates,
840  // loop here or whatever.
841  // Bump the stack pointer(s).
842  if (aligned_push_count > 0) {
843  // TODO(dcarney): it would be better to bump the csp here only
844  // and emit paired stores with increment for non c frames.
845  Emit(kArm64Claim | MiscField::encode(aligned_push_count), NULL);
846  }
847  // Move arguments to the stack.
848  {
849  int slot = buffer.pushed_nodes.size() - 1;
850  // Emit the uneven pushes.
851  if (pushed_count_uneven) {
852  Node* input = buffer.pushed_nodes[slot];
853  Emit(kArm64Poke | MiscField::encode(slot), NULL, g.UseRegister(input));
854  slot--;
855  }
856  // Now all pushes can be done in pairs.
857  for (; slot >= 0; slot -= 2) {
858  Emit(kArm64PokePair | MiscField::encode(slot), NULL,
859  g.UseRegister(buffer.pushed_nodes[slot]),
860  g.UseRegister(buffer.pushed_nodes[slot - 1]));
861  }
862  }
863 
864  // Select the appropriate opcode based on the call type.
865  InstructionCode opcode;
866  switch (descriptor->kind()) {
867  case CallDescriptor::kCallCodeObject: {
868  opcode = kArchCallCodeObject;
869  break;
870  }
871  case CallDescriptor::kCallJSFunction:
872  opcode = kArchCallJSFunction;
873  break;
874  default:
875  UNREACHABLE();
876  return;
877  }
878  opcode |= MiscField::encode(descriptor->flags());
879 
880  // Emit the call instruction.
881  Instruction* call_instr =
882  Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
883  buffer.instruction_args.size(), &buffer.instruction_args.front());
884 
885  call_instr->MarkAsCall();
886  if (deoptimization != NULL) {
887  DCHECK(continuation != NULL);
888  call_instr->MarkAsControl();
889  }
890 }
891 
892 } // namespace compiler
893 } // namespace internal
894 } // namespace v8
static bool IsImmAddSub(int64_t immediate)
static bool IsImmLSScaled(int64_t offset, LSDataSize size)
static bool IsImmLogical(uint64_t value, unsigned width, unsigned *n, unsigned *imm_s, unsigned *imm_r)
static bool IsImmLSUnscaled(int64_t offset)
Source to read snapshot and builtins files from.
Definition: lithium-arm.h:372
Arm64OperandGenerator(InstructionSelector *selector)
bool CanBeImmediate(Node *node, ImmediateMode mode)
bool IsLoadStoreImmediate(int64_t value, LSDataSize size)
InstructionOperand * UseOperand(Node *node, ImmediateMode mode)
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK_NE(v1, v2)
Definition: logging.h:207
#define DCHECK_GE(v1, v2)
Definition: logging.h:208
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
#define arraysize(array)
Definition: macros.h:86
static void VisitBinop(InstructionSelector *selector, Node *node, InstructionCode opcode, InstructionCode reverse_opcode, FlagsContinuation *cont)
BinopMatcher< Int32Matcher, Int32Matcher > Int32BinopMatcher
MachineType TypeOf(MachineType machine_type)
Definition: machine-type.h:70
static void VisitWordCompare(InstructionSelector *selector, Node *node, InstructionCode opcode, FlagsContinuation *cont, bool commutative)
MachineType RepresentationOf(MachineType machine_type)
Definition: machine-type.h:76
static void VisitRRO(InstructionSelector *selector, ArchOpcode opcode, Node *node, ImmediateMode operand_mode)
static void VisitRRR(InstructionSelector *selector, ArchOpcode opcode, Node *node)
BinopMatcher< Int64Matcher, Int64Matcher > Int64BinopMatcher
static void VisitLogical(InstructionSelector *selector, Node *node, Matcher *m, ArchOpcode opcode, bool left_can_cover, bool right_can_cover, ImmediateMode imm_mode)
static void VisitRRRFloat64(InstructionSelector *selector, ArchOpcode opcode, Node *node)
static void VisitCompare(InstructionSelector *selector, InstructionCode opcode, InstructionOperand *left, InstructionOperand *right, FlagsContinuation *cont)
const LowDwVfpRegister d1
const LowDwVfpRegister d0
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20