V8 Project
instruction-selector-arm.cc
Go to the documentation of this file.
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/bits.h"
8 
9 namespace v8 {
10 namespace internal {
11 namespace compiler {
12 
13 // Adds Arm-specific methods for generating InstructionOperands.
15  public:
16  explicit ArmOperandGenerator(InstructionSelector* selector)
18 
20  if (CanBeImmediate(node, opcode)) {
21  return UseImmediate(node);
22  }
23  return UseRegister(node);
24  }
25 
26  bool CanBeImmediate(Node* node, InstructionCode opcode) {
27  Int32Matcher m(node);
28  if (!m.HasValue()) return false;
29  int32_t value = m.Value();
30  switch (ArchOpcodeField::decode(opcode)) {
31  case kArmAnd:
32  case kArmMov:
33  case kArmMvn:
34  case kArmBic:
35  return ImmediateFitsAddrMode1Instruction(value) ||
37 
38  case kArmAdd:
39  case kArmSub:
40  case kArmCmp:
41  case kArmCmn:
42  return ImmediateFitsAddrMode1Instruction(value) ||
44 
45  case kArmTst:
46  case kArmTeq:
47  case kArmOrr:
48  case kArmEor:
49  case kArmRsb:
51 
52  case kArmVldrF32:
53  case kArmVstrF32:
54  case kArmVldrF64:
55  case kArmVstrF64:
56  return value >= -1020 && value <= 1020 && (value % 4) == 0;
57 
58  case kArmLdrb:
59  case kArmLdrsb:
60  case kArmStrb:
61  case kArmLdr:
62  case kArmStr:
63  case kArmStoreWriteBarrier:
64  return value >= -4095 && value <= 4095;
65 
66  case kArmLdrh:
67  case kArmLdrsh:
68  case kArmStrh:
69  return value >= -255 && value <= 255;
70 
71  case kArchCallCodeObject:
72  case kArchCallJSFunction:
73  case kArchJmp:
74  case kArchNop:
75  case kArchRet:
76  case kArchTruncateDoubleToI:
77  case kArmMul:
78  case kArmMla:
79  case kArmMls:
80  case kArmSdiv:
81  case kArmUdiv:
82  case kArmBfc:
83  case kArmUbfx:
84  case kArmVcmpF64:
85  case kArmVaddF64:
86  case kArmVsubF64:
87  case kArmVmulF64:
88  case kArmVmlaF64:
89  case kArmVmlsF64:
90  case kArmVdivF64:
91  case kArmVmodF64:
92  case kArmVnegF64:
93  case kArmVsqrtF64:
94  case kArmVcvtF32F64:
95  case kArmVcvtF64F32:
96  case kArmVcvtF64S32:
97  case kArmVcvtF64U32:
98  case kArmVcvtS32F64:
99  case kArmVcvtU32F64:
100  case kArmPush:
101  return false;
102  }
103  UNREACHABLE();
104  return false;
105  }
106 
107  private:
110  }
111 };
112 
113 
114 static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
115  Node* node) {
116  ArmOperandGenerator g(selector);
117  selector->Emit(opcode, g.DefineAsRegister(node),
118  g.UseRegister(node->InputAt(0)),
119  g.UseRegister(node->InputAt(1)));
120 }
121 
122 
123 static bool TryMatchROR(InstructionSelector* selector,
124  InstructionCode* opcode_return, Node* node,
125  InstructionOperand** value_return,
126  InstructionOperand** shift_return) {
127  ArmOperandGenerator g(selector);
128  if (node->opcode() != IrOpcode::kWord32Ror) return false;
129  Int32BinopMatcher m(node);
130  *value_return = g.UseRegister(m.left().node());
131  if (m.right().IsInRange(1, 31)) {
132  *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
133  *shift_return = g.UseImmediate(m.right().node());
134  } else {
135  *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_R);
136  *shift_return = g.UseRegister(m.right().node());
137  }
138  return true;
139 }
140 
141 
142 static inline bool TryMatchASR(InstructionSelector* selector,
143  InstructionCode* opcode_return, Node* node,
144  InstructionOperand** value_return,
145  InstructionOperand** shift_return) {
146  ArmOperandGenerator g(selector);
147  if (node->opcode() != IrOpcode::kWord32Sar) return false;
148  Int32BinopMatcher m(node);
149  *value_return = g.UseRegister(m.left().node());
150  if (m.right().IsInRange(1, 32)) {
151  *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
152  *shift_return = g.UseImmediate(m.right().node());
153  } else {
154  *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_R);
155  *shift_return = g.UseRegister(m.right().node());
156  }
157  return true;
158 }
159 
160 
161 static inline bool TryMatchLSL(InstructionSelector* selector,
162  InstructionCode* opcode_return, Node* node,
163  InstructionOperand** value_return,
164  InstructionOperand** shift_return) {
165  ArmOperandGenerator g(selector);
166  if (node->opcode() != IrOpcode::kWord32Shl) return false;
167  Int32BinopMatcher m(node);
168  *value_return = g.UseRegister(m.left().node());
169  if (m.right().IsInRange(0, 31)) {
170  *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
171  *shift_return = g.UseImmediate(m.right().node());
172  } else {
173  *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_R);
174  *shift_return = g.UseRegister(m.right().node());
175  }
176  return true;
177 }
178 
179 
180 static inline bool TryMatchLSR(InstructionSelector* selector,
181  InstructionCode* opcode_return, Node* node,
182  InstructionOperand** value_return,
183  InstructionOperand** shift_return) {
184  ArmOperandGenerator g(selector);
185  if (node->opcode() != IrOpcode::kWord32Shr) return false;
186  Int32BinopMatcher m(node);
187  *value_return = g.UseRegister(m.left().node());
188  if (m.right().IsInRange(1, 32)) {
189  *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
190  *shift_return = g.UseImmediate(m.right().node());
191  } else {
192  *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_R);
193  *shift_return = g.UseRegister(m.right().node());
194  }
195  return true;
196 }
197 
198 
199 static inline bool TryMatchShift(InstructionSelector* selector,
200  InstructionCode* opcode_return, Node* node,
201  InstructionOperand** value_return,
202  InstructionOperand** shift_return) {
203  return (
204  TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
205  TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
206  TryMatchLSR(selector, opcode_return, node, value_return, shift_return) ||
207  TryMatchROR(selector, opcode_return, node, value_return, shift_return));
208 }
209 
210 
211 static inline bool TryMatchImmediateOrShift(InstructionSelector* selector,
212  InstructionCode* opcode_return,
213  Node* node,
214  size_t* input_count_return,
215  InstructionOperand** inputs) {
216  ArmOperandGenerator g(selector);
217  if (g.CanBeImmediate(node, *opcode_return)) {
218  *opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
219  inputs[0] = g.UseImmediate(node);
220  *input_count_return = 1;
221  return true;
222  }
223  if (TryMatchShift(selector, opcode_return, node, &inputs[0], &inputs[1])) {
224  *input_count_return = 2;
225  return true;
226  }
227  return false;
228 }
229 
230 
231 static void VisitBinop(InstructionSelector* selector, Node* node,
232  InstructionCode opcode, InstructionCode reverse_opcode,
233  FlagsContinuation* cont) {
234  ArmOperandGenerator g(selector);
235  Int32BinopMatcher m(node);
236  InstructionOperand* inputs[5];
237  size_t input_count = 0;
238  InstructionOperand* outputs[2];
239  size_t output_count = 0;
240 
241  if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
242  &input_count, &inputs[1])) {
243  inputs[0] = g.UseRegister(m.left().node());
244  input_count++;
245  } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
246  m.left().node(), &input_count,
247  &inputs[1])) {
248  inputs[0] = g.UseRegister(m.right().node());
249  opcode = reverse_opcode;
250  input_count++;
251  } else {
252  opcode |= AddressingModeField::encode(kMode_Operand2_R);
253  inputs[input_count++] = g.UseRegister(m.left().node());
254  inputs[input_count++] = g.UseRegister(m.right().node());
255  }
256 
257  if (cont->IsBranch()) {
258  inputs[input_count++] = g.Label(cont->true_block());
259  inputs[input_count++] = g.Label(cont->false_block());
260  }
261 
262  outputs[output_count++] = g.DefineAsRegister(node);
263  if (cont->IsSet()) {
264  outputs[output_count++] = g.DefineAsRegister(cont->result());
265  }
266 
267  DCHECK_NE(0, input_count);
268  DCHECK_NE(0, output_count);
269  DCHECK_GE(arraysize(inputs), input_count);
270  DCHECK_GE(arraysize(outputs), output_count);
271  DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
272 
273  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
274  outputs, input_count, inputs);
275  if (cont->IsBranch()) instr->MarkAsControl();
276 }
277 
278 
279 static void VisitBinop(InstructionSelector* selector, Node* node,
280  InstructionCode opcode, InstructionCode reverse_opcode) {
281  FlagsContinuation cont;
282  VisitBinop(selector, node, opcode, reverse_opcode, &cont);
283 }
284 
285 
286 void InstructionSelector::VisitLoad(Node* node) {
287  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
288  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
289  ArmOperandGenerator g(this);
290  Node* base = node->InputAt(0);
291  Node* index = node->InputAt(1);
292 
293  ArchOpcode opcode;
294  switch (rep) {
295  case kRepFloat32:
296  opcode = kArmVldrF32;
297  break;
298  case kRepFloat64:
299  opcode = kArmVldrF64;
300  break;
301  case kRepBit: // Fall through.
302  case kRepWord8:
303  opcode = typ == kTypeUint32 ? kArmLdrb : kArmLdrsb;
304  break;
305  case kRepWord16:
306  opcode = typ == kTypeUint32 ? kArmLdrh : kArmLdrsh;
307  break;
308  case kRepTagged: // Fall through.
309  case kRepWord32:
310  opcode = kArmLdr;
311  break;
312  default:
313  UNREACHABLE();
314  return;
315  }
316 
317  if (g.CanBeImmediate(index, opcode)) {
318  Emit(opcode | AddressingModeField::encode(kMode_Offset_RI),
319  g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
320  } else {
321  Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
322  g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
323  }
324 }
325 
326 
327 void InstructionSelector::VisitStore(Node* node) {
328  ArmOperandGenerator g(this);
329  Node* base = node->InputAt(0);
330  Node* index = node->InputAt(1);
331  Node* value = node->InputAt(2);
332 
333  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
334  MachineType rep = RepresentationOf(store_rep.machine_type());
335  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
336  DCHECK(rep == kRepTagged);
337  // TODO(dcarney): refactor RecordWrite function to take temp registers
338  // and pass them here instead of using fixed regs
339  // TODO(dcarney): handle immediate indices.
340  InstructionOperand* temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
341  Emit(kArmStoreWriteBarrier, NULL, g.UseFixed(base, r4),
342  g.UseFixed(index, r5), g.UseFixed(value, r6), arraysize(temps),
343  temps);
344  return;
345  }
346  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
347 
348  ArchOpcode opcode;
349  switch (rep) {
350  case kRepFloat32:
351  opcode = kArmVstrF32;
352  break;
353  case kRepFloat64:
354  opcode = kArmVstrF64;
355  break;
356  case kRepBit: // Fall through.
357  case kRepWord8:
358  opcode = kArmStrb;
359  break;
360  case kRepWord16:
361  opcode = kArmStrh;
362  break;
363  case kRepTagged: // Fall through.
364  case kRepWord32:
365  opcode = kArmStr;
366  break;
367  default:
368  UNREACHABLE();
369  return;
370  }
371 
372  if (g.CanBeImmediate(index, opcode)) {
373  Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
374  g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
375  } else {
376  Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), NULL,
377  g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
378  }
379 }
380 
381 
382 static inline void EmitBic(InstructionSelector* selector, Node* node,
383  Node* left, Node* right) {
384  ArmOperandGenerator g(selector);
385  InstructionCode opcode = kArmBic;
386  InstructionOperand* value_operand;
387  InstructionOperand* shift_operand;
388  if (TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) {
389  selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
390  value_operand, shift_operand);
391  return;
392  }
393  selector->Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
394  g.DefineAsRegister(node), g.UseRegister(left),
395  g.UseRegister(right));
396 }
397 
398 
399 void InstructionSelector::VisitWord32And(Node* node) {
400  ArmOperandGenerator g(this);
401  Int32BinopMatcher m(node);
402  if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
403  Int32BinopMatcher mleft(m.left().node());
404  if (mleft.right().Is(-1)) {
405  EmitBic(this, node, m.right().node(), mleft.left().node());
406  return;
407  }
408  }
409  if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
410  Int32BinopMatcher mright(m.right().node());
411  if (mright.right().Is(-1)) {
412  EmitBic(this, node, m.left().node(), mright.left().node());
413  return;
414  }
415  }
416  if (IsSupported(ARMv7) && m.right().HasValue()) {
417  uint32_t value = m.right().Value();
420  if (width != 0 && msb + width == 32) {
422  if (m.left().IsWord32Shr()) {
423  Int32BinopMatcher mleft(m.left().node());
424  if (mleft.right().IsInRange(0, 31)) {
425  Emit(kArmUbfx, g.DefineAsRegister(node),
426  g.UseRegister(mleft.left().node()),
427  g.UseImmediate(mleft.right().node()), g.TempImmediate(width));
428  return;
429  }
430  }
431  Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
432  g.TempImmediate(0), g.TempImmediate(width));
433  return;
434  }
435  // Try to interpret this AND as BFC.
436  width = 32 - width;
437  msb = base::bits::CountLeadingZeros32(~value);
439  if (msb + width + lsb == 32) {
440  Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
441  g.TempImmediate(lsb), g.TempImmediate(width));
442  return;
443  }
444  }
445  VisitBinop(this, node, kArmAnd, kArmAnd);
446 }
447 
448 
449 void InstructionSelector::VisitWord32Or(Node* node) {
450  VisitBinop(this, node, kArmOrr, kArmOrr);
451 }
452 
453 
454 void InstructionSelector::VisitWord32Xor(Node* node) {
455  ArmOperandGenerator g(this);
456  Int32BinopMatcher m(node);
457  if (m.right().Is(-1)) {
458  InstructionCode opcode = kArmMvn;
459  InstructionOperand* value_operand;
460  InstructionOperand* shift_operand;
461  if (TryMatchShift(this, &opcode, m.left().node(), &value_operand,
462  &shift_operand)) {
463  Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
464  return;
465  }
466  Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
467  g.DefineAsRegister(node), g.UseRegister(m.left().node()));
468  return;
469  }
470  VisitBinop(this, node, kArmEor, kArmEor);
471 }
472 
473 
474 template <typename TryMatchShift>
475 static inline void VisitShift(InstructionSelector* selector, Node* node,
476  TryMatchShift try_match_shift,
477  FlagsContinuation* cont) {
478  ArmOperandGenerator g(selector);
479  InstructionCode opcode = kArmMov;
480  InstructionOperand* inputs[4];
481  size_t input_count = 2;
482  InstructionOperand* outputs[2];
483  size_t output_count = 0;
484 
485  CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
486 
487  if (cont->IsBranch()) {
488  inputs[input_count++] = g.Label(cont->true_block());
489  inputs[input_count++] = g.Label(cont->false_block());
490  }
491 
492  outputs[output_count++] = g.DefineAsRegister(node);
493  if (cont->IsSet()) {
494  outputs[output_count++] = g.DefineAsRegister(cont->result());
495  }
496 
497  DCHECK_NE(0, input_count);
498  DCHECK_NE(0, output_count);
499  DCHECK_GE(arraysize(inputs), input_count);
500  DCHECK_GE(arraysize(outputs), output_count);
501  DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
502 
503  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
504  outputs, input_count, inputs);
505  if (cont->IsBranch()) instr->MarkAsControl();
506 }
507 
508 
509 template <typename TryMatchShift>
510 static inline void VisitShift(InstructionSelector* selector, Node* node,
511  TryMatchShift try_match_shift) {
512  FlagsContinuation cont;
513  VisitShift(selector, node, try_match_shift, &cont);
514 }
515 
516 
517 void InstructionSelector::VisitWord32Shl(Node* node) {
518  VisitShift(this, node, TryMatchLSL);
519 }
520 
521 
522 void InstructionSelector::VisitWord32Shr(Node* node) {
523  ArmOperandGenerator g(this);
524  Int32BinopMatcher m(node);
525  if (IsSupported(ARMv7) && m.left().IsWord32And() &&
526  m.right().IsInRange(0, 31)) {
527  int32_t lsb = m.right().Value();
528  Int32BinopMatcher mleft(m.left().node());
529  if (mleft.right().HasValue()) {
530  uint32_t value = (mleft.right().Value() >> lsb) << lsb;
533  if (msb + width + lsb == 32) {
535  Emit(kArmUbfx, g.DefineAsRegister(node),
536  g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
537  g.TempImmediate(width));
538  return;
539  }
540  }
541  }
542  VisitShift(this, node, TryMatchLSR);
543 }
544 
545 
546 void InstructionSelector::VisitWord32Sar(Node* node) {
547  VisitShift(this, node, TryMatchASR);
548 }
549 
550 
551 void InstructionSelector::VisitWord32Ror(Node* node) {
552  VisitShift(this, node, TryMatchROR);
553 }
554 
555 
556 void InstructionSelector::VisitInt32Add(Node* node) {
557  ArmOperandGenerator g(this);
558  Int32BinopMatcher m(node);
559  if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
560  Int32BinopMatcher mleft(m.left().node());
561  Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mleft.left().node()),
562  g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
563  return;
564  }
565  if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
566  Int32BinopMatcher mright(m.right().node());
567  Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
568  g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
569  return;
570  }
571  VisitBinop(this, node, kArmAdd, kArmAdd);
572 }
573 
574 
575 void InstructionSelector::VisitInt32Sub(Node* node) {
576  ArmOperandGenerator g(this);
577  Int32BinopMatcher m(node);
578  if (IsSupported(MLS) && m.right().IsInt32Mul() &&
579  CanCover(node, m.right().node())) {
580  Int32BinopMatcher mright(m.right().node());
581  Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
582  g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
583  return;
584  }
585  VisitBinop(this, node, kArmSub, kArmRsb);
586 }
587 
588 
589 void InstructionSelector::VisitInt32Mul(Node* node) {
590  ArmOperandGenerator g(this);
591  Int32BinopMatcher m(node);
592  if (m.right().HasValue() && m.right().Value() > 0) {
593  int32_t value = m.right().Value();
594  if (base::bits::IsPowerOfTwo32(value - 1)) {
595  Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
596  g.DefineAsRegister(node), g.UseRegister(m.left().node()),
597  g.UseRegister(m.left().node()),
598  g.TempImmediate(WhichPowerOf2(value - 1)));
599  return;
600  }
601  if (value < kMaxInt && base::bits::IsPowerOfTwo32(value + 1)) {
602  Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
603  g.DefineAsRegister(node), g.UseRegister(m.left().node()),
604  g.UseRegister(m.left().node()),
605  g.TempImmediate(WhichPowerOf2(value + 1)));
606  return;
607  }
608  }
609  Emit(kArmMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
610  g.UseRegister(m.right().node()));
611 }
612 
613 
614 static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
615  ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
616  InstructionOperand* result_operand,
617  InstructionOperand* left_operand,
618  InstructionOperand* right_operand) {
619  ArmOperandGenerator g(selector);
620  if (selector->IsSupported(SUDIV)) {
621  selector->Emit(div_opcode, result_operand, left_operand, right_operand);
622  return;
623  }
624  InstructionOperand* left_double_operand = g.TempDoubleRegister();
625  InstructionOperand* right_double_operand = g.TempDoubleRegister();
626  InstructionOperand* result_double_operand = g.TempDoubleRegister();
627  selector->Emit(f64i32_opcode, left_double_operand, left_operand);
628  selector->Emit(f64i32_opcode, right_double_operand, right_operand);
629  selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
630  right_double_operand);
631  selector->Emit(i32f64_opcode, result_operand, result_double_operand);
632 }
633 
634 
635 static void VisitDiv(InstructionSelector* selector, Node* node,
636  ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
637  ArchOpcode i32f64_opcode) {
638  ArmOperandGenerator g(selector);
639  Int32BinopMatcher m(node);
640  EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
641  g.DefineAsRegister(node), g.UseRegister(m.left().node()),
642  g.UseRegister(m.right().node()));
643 }
644 
645 
646 void InstructionSelector::VisitInt32Div(Node* node) {
647  VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
648 }
649 
650 
651 void InstructionSelector::VisitInt32UDiv(Node* node) {
652  VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
653 }
654 
655 
656 static void VisitMod(InstructionSelector* selector, Node* node,
657  ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
658  ArchOpcode i32f64_opcode) {
659  ArmOperandGenerator g(selector);
660  Int32BinopMatcher m(node);
661  InstructionOperand* div_operand = g.TempRegister();
662  InstructionOperand* result_operand = g.DefineAsRegister(node);
663  InstructionOperand* left_operand = g.UseRegister(m.left().node());
664  InstructionOperand* right_operand = g.UseRegister(m.right().node());
665  EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
666  left_operand, right_operand);
667  if (selector->IsSupported(MLS)) {
668  selector->Emit(kArmMls, result_operand, div_operand, right_operand,
669  left_operand);
670  return;
671  }
672  InstructionOperand* mul_operand = g.TempRegister();
673  selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
674  selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
675 }
676 
677 
678 void InstructionSelector::VisitInt32Mod(Node* node) {
679  VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
680 }
681 
682 
683 void InstructionSelector::VisitInt32UMod(Node* node) {
684  VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
685 }
686 
687 
688 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
689  ArmOperandGenerator g(this);
690  Emit(kArmVcvtF64F32, g.DefineAsRegister(node),
691  g.UseRegister(node->InputAt(0)));
692 }
693 
694 
695 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
696  ArmOperandGenerator g(this);
697  Emit(kArmVcvtF64S32, g.DefineAsRegister(node),
698  g.UseRegister(node->InputAt(0)));
699 }
700 
701 
702 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
703  ArmOperandGenerator g(this);
704  Emit(kArmVcvtF64U32, g.DefineAsRegister(node),
705  g.UseRegister(node->InputAt(0)));
706 }
707 
708 
709 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
710  ArmOperandGenerator g(this);
711  Emit(kArmVcvtS32F64, g.DefineAsRegister(node),
712  g.UseRegister(node->InputAt(0)));
713 }
714 
715 
716 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
717  ArmOperandGenerator g(this);
718  Emit(kArmVcvtU32F64, g.DefineAsRegister(node),
719  g.UseRegister(node->InputAt(0)));
720 }
721 
722 
723 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
724  ArmOperandGenerator g(this);
725  Emit(kArmVcvtF32F64, g.DefineAsRegister(node),
726  g.UseRegister(node->InputAt(0)));
727 }
728 
729 
730 void InstructionSelector::VisitFloat64Add(Node* node) {
731  ArmOperandGenerator g(this);
732  Int32BinopMatcher m(node);
733  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
734  Int32BinopMatcher mleft(m.left().node());
735  Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
736  g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
737  g.UseRegister(mleft.right().node()));
738  return;
739  }
740  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
741  Int32BinopMatcher mright(m.right().node());
742  Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
743  g.UseRegister(mright.left().node()),
744  g.UseRegister(mright.right().node()));
745  return;
746  }
747  VisitRRRFloat64(this, kArmVaddF64, node);
748 }
749 
750 
751 void InstructionSelector::VisitFloat64Sub(Node* node) {
752  ArmOperandGenerator g(this);
753  Int32BinopMatcher m(node);
754  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
755  Int32BinopMatcher mright(m.right().node());
756  Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
757  g.UseRegister(mright.left().node()),
758  g.UseRegister(mright.right().node()));
759  return;
760  }
761  VisitRRRFloat64(this, kArmVsubF64, node);
762 }
763 
764 
765 void InstructionSelector::VisitFloat64Mul(Node* node) {
766  ArmOperandGenerator g(this);
767  Float64BinopMatcher m(node);
768  if (m.right().Is(-1.0)) {
769  Emit(kArmVnegF64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
770  } else {
771  VisitRRRFloat64(this, kArmVmulF64, node);
772  }
773 }
774 
775 
776 void InstructionSelector::VisitFloat64Div(Node* node) {
777  VisitRRRFloat64(this, kArmVdivF64, node);
778 }
779 
780 
781 void InstructionSelector::VisitFloat64Mod(Node* node) {
782  ArmOperandGenerator g(this);
783  Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
784  g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
785 }
786 
787 
788 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
789  ArmOperandGenerator g(this);
790  Emit(kArmVsqrtF64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
791 }
792 
793 
794 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
795  BasicBlock* deoptimization) {
796  ArmOperandGenerator g(this);
797  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
798 
799  FrameStateDescriptor* frame_state_descriptor = NULL;
800  if (descriptor->NeedsFrameState()) {
801  frame_state_descriptor =
802  GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
803  }
804 
805  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
806 
807  // Compute InstructionOperands for inputs and outputs.
808  // TODO(turbofan): on ARM64 it's probably better to use the code object in a
809  // register if there are multiple uses of it. Improve constant pool and the
810  // heuristics in the register allocator for where to emit constants.
811  InitializeCallBuffer(call, &buffer, true, false);
812 
813  // TODO(dcarney): might be possible to use claim/poke instead
814  // Push any stack arguments.
815  for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
816  input != buffer.pushed_nodes.rend(); input++) {
817  Emit(kArmPush, NULL, g.UseRegister(*input));
818  }
819 
820  // Select the appropriate opcode based on the call type.
821  InstructionCode opcode;
822  switch (descriptor->kind()) {
823  case CallDescriptor::kCallCodeObject: {
824  opcode = kArchCallCodeObject;
825  break;
826  }
827  case CallDescriptor::kCallJSFunction:
828  opcode = kArchCallJSFunction;
829  break;
830  default:
831  UNREACHABLE();
832  return;
833  }
834  opcode |= MiscField::encode(descriptor->flags());
835 
836  // Emit the call instruction.
837  Instruction* call_instr =
838  Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
839  buffer.instruction_args.size(), &buffer.instruction_args.front());
840 
841  call_instr->MarkAsCall();
842  if (deoptimization != NULL) {
843  DCHECK(continuation != NULL);
844  call_instr->MarkAsControl();
845  }
846 }
847 
848 
849 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
850  FlagsContinuation* cont) {
851  VisitBinop(this, node, kArmAdd, kArmAdd, cont);
852 }
853 
854 
855 void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
856  FlagsContinuation* cont) {
857  VisitBinop(this, node, kArmSub, kArmRsb, cont);
858 }
859 
860 
861 // Shared routine for multiple compare operations.
862 static void VisitWordCompare(InstructionSelector* selector, Node* node,
863  InstructionCode opcode, FlagsContinuation* cont,
864  bool commutative) {
865  ArmOperandGenerator g(selector);
866  Int32BinopMatcher m(node);
867  InstructionOperand* inputs[5];
868  size_t input_count = 0;
869  InstructionOperand* outputs[1];
870  size_t output_count = 0;
871 
872  if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
873  &input_count, &inputs[1])) {
874  inputs[0] = g.UseRegister(m.left().node());
875  input_count++;
876  } else if (TryMatchImmediateOrShift(selector, &opcode, m.left().node(),
877  &input_count, &inputs[1])) {
878  if (!commutative) cont->Commute();
879  inputs[0] = g.UseRegister(m.right().node());
880  input_count++;
881  } else {
882  opcode |= AddressingModeField::encode(kMode_Operand2_R);
883  inputs[input_count++] = g.UseRegister(m.left().node());
884  inputs[input_count++] = g.UseRegister(m.right().node());
885  }
886 
887  if (cont->IsBranch()) {
888  inputs[input_count++] = g.Label(cont->true_block());
889  inputs[input_count++] = g.Label(cont->false_block());
890  } else {
891  DCHECK(cont->IsSet());
892  outputs[output_count++] = g.DefineAsRegister(cont->result());
893  }
894 
895  DCHECK_NE(0, input_count);
896  DCHECK_GE(arraysize(inputs), input_count);
897  DCHECK_GE(arraysize(outputs), output_count);
898 
899  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
900  outputs, input_count, inputs);
901  if (cont->IsBranch()) instr->MarkAsControl();
902 }
903 
904 
905 void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
906  switch (node->opcode()) {
907  case IrOpcode::kInt32Add:
908  return VisitWordCompare(this, node, kArmCmn, cont, true);
909  case IrOpcode::kInt32Sub:
910  return VisitWordCompare(this, node, kArmCmp, cont, false);
911  case IrOpcode::kWord32And:
912  return VisitWordCompare(this, node, kArmTst, cont, true);
913  case IrOpcode::kWord32Or:
914  return VisitBinop(this, node, kArmOrr, kArmOrr, cont);
915  case IrOpcode::kWord32Xor:
916  return VisitWordCompare(this, node, kArmTeq, cont, true);
917  case IrOpcode::kWord32Sar:
918  return VisitShift(this, node, TryMatchASR, cont);
919  case IrOpcode::kWord32Shl:
920  return VisitShift(this, node, TryMatchLSL, cont);
921  case IrOpcode::kWord32Shr:
922  return VisitShift(this, node, TryMatchLSR, cont);
923  case IrOpcode::kWord32Ror:
924  return VisitShift(this, node, TryMatchROR, cont);
925  default:
926  break;
927  }
928 
929  ArmOperandGenerator g(this);
930  InstructionCode opcode =
931  cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
932  if (cont->IsBranch()) {
933  Emit(opcode, NULL, g.UseRegister(node), g.UseRegister(node),
934  g.Label(cont->true_block()),
935  g.Label(cont->false_block()))->MarkAsControl();
936  } else {
937  Emit(opcode, g.DefineAsRegister(cont->result()), g.UseRegister(node),
938  g.UseRegister(node));
939  }
940 }
941 
942 
943 void InstructionSelector::VisitWord32Compare(Node* node,
944  FlagsContinuation* cont) {
945  VisitWordCompare(this, node, kArmCmp, cont, false);
946 }
947 
948 
949 void InstructionSelector::VisitFloat64Compare(Node* node,
950  FlagsContinuation* cont) {
951  ArmOperandGenerator g(this);
952  Float64BinopMatcher m(node);
953  if (cont->IsBranch()) {
954  Emit(cont->Encode(kArmVcmpF64), NULL, g.UseRegister(m.left().node()),
955  g.UseRegister(m.right().node()), g.Label(cont->true_block()),
956  g.Label(cont->false_block()))->MarkAsControl();
957  } else {
958  DCHECK(cont->IsSet());
959  Emit(cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
960  g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
961  }
962 }
963 
964 } // namespace compiler
965 } // namespace internal
966 } // namespace v8
static bool ImmediateFitsAddrMode1Instruction(int32_t imm32)
InstructionOperand * UseOperand(Node *node, InstructionCode opcode)
bool CanBeImmediate(Node *node, InstructionCode opcode)
InstructionOperand * UseImmediate(Node *node)
InstructionOperand * DefineAsRegister(Node *node)
InstructionOperand * UseRegister(Node *node)
InstructionOperand * Label(BasicBlock *block)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define CHECK(condition)
Definition: logging.h:36
#define DCHECK_NE(v1, v2)
Definition: logging.h:207
#define DCHECK_GE(v1, v2)
Definition: logging.h:208
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
#define arraysize(array)
Definition: macros.h:86
int int32_t
Definition: unicode.cc:24
uint32_t CountTrailingZeros32(uint32_t value)
Definition: bits.h:59
uint32_t CountPopulation32(uint32_t value)
Definition: bits.h:22
uint32_t CountLeadingZeros32(uint32_t value)
Definition: bits.h:38
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
static void EmitDiv(InstructionSelector *selector, ArchOpcode div_opcode, ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode, InstructionOperand *result_operand, InstructionOperand *left_operand, InstructionOperand *right_operand)
static void VisitBinop(InstructionSelector *selector, Node *node, InstructionCode opcode, InstructionCode reverse_opcode, FlagsContinuation *cont)
static bool TryMatchASR(InstructionSelector *selector, InstructionCode *opcode_return, Node *node, InstructionOperand **value_return, InstructionOperand **shift_return)
static void EmitBic(InstructionSelector *selector, Node *node, Node *left, Node *right)
static void VisitDiv(InstructionSelector *selector, Node *node, ArchOpcode div_opcode, ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode)
static bool TryMatchImmediateOrShift(InstructionSelector *selector, InstructionCode *opcode_return, Node *node, size_t *input_count_return, InstructionOperand **inputs)
static bool TryMatchLSR(InstructionSelector *selector, InstructionCode *opcode_return, Node *node, InstructionOperand **value_return, InstructionOperand **shift_return)
BinopMatcher< Int32Matcher, Int32Matcher > Int32BinopMatcher
static bool TryMatchShift(InstructionSelector *selector, InstructionCode *opcode_return, Node *node, InstructionOperand **value_return, InstructionOperand **shift_return)
MachineType TypeOf(MachineType machine_type)
Definition: machine-type.h:70
static void VisitWordCompare(InstructionSelector *selector, Node *node, InstructionCode opcode, FlagsContinuation *cont, bool commutative)
MachineType RepresentationOf(MachineType machine_type)
Definition: machine-type.h:76
BinopMatcher< Float64Matcher, Float64Matcher > Float64BinopMatcher
static void VisitMod(InstructionSelector *selector, Node *node, ArchOpcode div_opcode, ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode)
static void VisitShift(InstructionSelector *selector, Node *node, TryMatchShift try_match_shift, FlagsContinuation *cont)
static bool TryMatchLSL(InstructionSelector *selector, InstructionCode *opcode_return, Node *node, InstructionOperand **value_return, InstructionOperand **shift_return)
static bool TryMatchROR(InstructionSelector *selector, InstructionCode *opcode_return, Node *node, InstructionOperand **value_return, InstructionOperand **shift_return)
NodeVector::reverse_iterator NodeVectorRIter
Definition: node.h:75
IntMatcher< int32_t, IrOpcode::kInt32Constant > Int32Matcher
Definition: node-matchers.h:79
static void VisitRRRFloat64(InstructionSelector *selector, ArchOpcode opcode, Node *node)
int WhichPowerOf2(uint32_t x)
Definition: utils.h:37
const LowDwVfpRegister d1
const Register r6
const LowDwVfpRegister d0
const int kMaxInt
Definition: globals.h:109
const Register r4
const Register r5
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20