V8 Project
macro-assembler-arm64-inl.h
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
6 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
7 
8 #include <ctype.h>
9 
10 #include "src/globals.h"
11 
16 #include "src/base/bits.h"
17 
18 
19 namespace v8 {
20 namespace internal {
21 
22 
23 MemOperand FieldMemOperand(Register object, int offset) {
24  return MemOperand(object, offset - kHeapObjectTag);
25 }
26 
27 
29  return UntagSmiMemOperand(object, offset - kHeapObjectTag);
30 }
31 
32 
33 MemOperand UntagSmiMemOperand(Register object, int offset) {
34  // Assumes that Smis are shifted by 32 bits and little endianness.
35  STATIC_ASSERT(kSmiShift == 32);
36  return MemOperand(object, offset + (kSmiShift / kBitsPerByte));
37 }
38 
39 
41  DCHECK(!code_object_.is_null());
42  return code_object_;
43 }
44 
45 
47  const Register& rn,
48  const Operand& operand) {
49  DCHECK(allow_macro_instructions_);
50  DCHECK(!rd.IsZero());
51  LogicalMacro(rd, rn, operand, AND);
52 }
53 
54 
56  const Register& rn,
57  const Operand& operand) {
58  DCHECK(allow_macro_instructions_);
59  DCHECK(!rd.IsZero());
60  LogicalMacro(rd, rn, operand, ANDS);
61 }
62 
63 
65  const Operand& operand) {
66  DCHECK(allow_macro_instructions_);
67  LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS);
68 }
69 
70 
72  const Register& rn,
73  const Operand& operand) {
74  DCHECK(allow_macro_instructions_);
75  DCHECK(!rd.IsZero());
76  LogicalMacro(rd, rn, operand, BIC);
77 }
78 
79 
81  const Register& rn,
82  const Operand& operand) {
83  DCHECK(allow_macro_instructions_);
84  DCHECK(!rd.IsZero());
85  LogicalMacro(rd, rn, operand, BICS);
86 }
87 
88 
90  const Register& rn,
91  const Operand& operand) {
92  DCHECK(allow_macro_instructions_);
93  DCHECK(!rd.IsZero());
94  LogicalMacro(rd, rn, operand, ORR);
95 }
96 
97 
99  const Register& rn,
100  const Operand& operand) {
101  DCHECK(allow_macro_instructions_);
102  DCHECK(!rd.IsZero());
103  LogicalMacro(rd, rn, operand, ORN);
104 }
105 
106 
108  const Register& rn,
109  const Operand& operand) {
110  DCHECK(allow_macro_instructions_);
111  DCHECK(!rd.IsZero());
112  LogicalMacro(rd, rn, operand, EOR);
113 }
114 
115 
117  const Register& rn,
118  const Operand& operand) {
119  DCHECK(allow_macro_instructions_);
120  DCHECK(!rd.IsZero());
121  LogicalMacro(rd, rn, operand, EON);
122 }
123 
124 
126  const Operand& operand,
127  StatusFlags nzcv,
128  Condition cond) {
129  DCHECK(allow_macro_instructions_);
130  if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
131  ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMN);
132  } else {
133  ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
134  }
135 }
136 
137 
139  const Operand& operand,
140  StatusFlags nzcv,
141  Condition cond) {
142  DCHECK(allow_macro_instructions_);
143  if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
144  ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMP);
145  } else {
146  ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
147  }
148 }
149 
150 
152  const Register& rn,
153  const Operand& operand) {
154  DCHECK(allow_macro_instructions_);
155  if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
156  IsImmAddSub(-operand.ImmediateValue())) {
157  AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, SUB);
158  } else {
159  AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
160  }
161 }
162 
164  const Register& rn,
165  const Operand& operand) {
166  DCHECK(allow_macro_instructions_);
167  if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
168  IsImmAddSub(-operand.ImmediateValue())) {
169  AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, SUB);
170  } else {
171  AddSubMacro(rd, rn, operand, SetFlags, ADD);
172  }
173 }
174 
175 
177  const Register& rn,
178  const Operand& operand) {
179  DCHECK(allow_macro_instructions_);
180  if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
181  IsImmAddSub(-operand.ImmediateValue())) {
182  AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, ADD);
183  } else {
184  AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
185  }
186 }
187 
188 
190  const Register& rn,
191  const Operand& operand) {
192  DCHECK(allow_macro_instructions_);
193  if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
194  IsImmAddSub(-operand.ImmediateValue())) {
195  AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, ADD);
196  } else {
197  AddSubMacro(rd, rn, operand, SetFlags, SUB);
198  }
199 }
200 
201 
202 void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
203  DCHECK(allow_macro_instructions_);
204  Adds(AppropriateZeroRegFor(rn), rn, operand);
205 }
206 
207 
208 void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
209  DCHECK(allow_macro_instructions_);
210  Subs(AppropriateZeroRegFor(rn), rn, operand);
211 }
212 
213 
215  const Operand& operand) {
216  DCHECK(allow_macro_instructions_);
217  DCHECK(!rd.IsZero());
218  if (operand.IsImmediate()) {
219  Mov(rd, -operand.ImmediateValue());
220  } else {
221  Sub(rd, AppropriateZeroRegFor(rd), operand);
222  }
223 }
224 
225 
227  const Operand& operand) {
228  DCHECK(allow_macro_instructions_);
229  Subs(rd, AppropriateZeroRegFor(rd), operand);
230 }
231 
232 
234  const Register& rn,
235  const Operand& operand) {
236  DCHECK(allow_macro_instructions_);
237  DCHECK(!rd.IsZero());
238  AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
239 }
240 
241 
243  const Register& rn,
244  const Operand& operand) {
245  DCHECK(allow_macro_instructions_);
246  DCHECK(!rd.IsZero());
247  AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
248 }
249 
250 
252  const Register& rn,
253  const Operand& operand) {
254  DCHECK(allow_macro_instructions_);
255  DCHECK(!rd.IsZero());
256  AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
257 }
258 
259 
261  const Register& rn,
262  const Operand& operand) {
263  DCHECK(allow_macro_instructions_);
264  DCHECK(!rd.IsZero());
265  AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
266 }
267 
268 
270  const Operand& operand) {
271  DCHECK(allow_macro_instructions_);
272  DCHECK(!rd.IsZero());
274  Sbc(rd, zr, operand);
275 }
276 
277 
279  const Operand& operand) {
280  DCHECK(allow_macro_instructions_);
281  DCHECK(!rd.IsZero());
283  Sbcs(rd, zr, operand);
284 }
285 
286 
287 void MacroAssembler::Mvn(const Register& rd, uint64_t imm) {
288  DCHECK(allow_macro_instructions_);
289  DCHECK(!rd.IsZero());
290  Mov(rd, ~imm);
291 }
292 
293 
294 #define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
295 void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
296  DCHECK(allow_macro_instructions_); \
297  LoadStoreMacro(REG, addr, OP); \
298 }
300 #undef DEFINE_FUNCTION
301 
302 
303 #define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
304  void MacroAssembler::FN(const REGTYPE REG, const REGTYPE REG2, \
305  const MemOperand& addr) { \
306  DCHECK(allow_macro_instructions_); \
307  LoadStorePairMacro(REG, REG2, addr, OP); \
308  }
310 #undef DEFINE_FUNCTION
311 
312 
314  const Register& rn,
315  unsigned shift) {
316  DCHECK(allow_macro_instructions_);
317  DCHECK(!rd.IsZero());
318  asr(rd, rn, shift);
319 }
320 
321 
323  const Register& rn,
324  const Register& rm) {
325  DCHECK(allow_macro_instructions_);
326  DCHECK(!rd.IsZero());
327  asrv(rd, rn, rm);
328 }
329 
330 
331 void MacroAssembler::B(Label* label) {
332  b(label);
333  CheckVeneerPool(false, false);
334 }
335 
336 
337 void MacroAssembler::B(Condition cond, Label* label) {
338  DCHECK(allow_macro_instructions_);
339  B(label, cond);
340 }
341 
342 
344  const Register& rn,
345  unsigned lsb,
346  unsigned width) {
347  DCHECK(allow_macro_instructions_);
348  DCHECK(!rd.IsZero());
349  bfi(rd, rn, lsb, width);
350 }
351 
352 
354  const Register& rn,
355  unsigned lsb,
356  unsigned width) {
357  DCHECK(allow_macro_instructions_);
358  DCHECK(!rd.IsZero());
359  bfxil(rd, rn, lsb, width);
360 }
361 
362 
363 void MacroAssembler::Bind(Label* label) {
364  DCHECK(allow_macro_instructions_);
365  bind(label);
366 }
367 
368 
369 void MacroAssembler::Bl(Label* label) {
370  DCHECK(allow_macro_instructions_);
371  bl(label);
372 }
373 
374 
375 void MacroAssembler::Blr(const Register& xn) {
376  DCHECK(allow_macro_instructions_);
377  DCHECK(!xn.IsZero());
378  blr(xn);
379 }
380 
381 
382 void MacroAssembler::Br(const Register& xn) {
383  DCHECK(allow_macro_instructions_);
384  DCHECK(!xn.IsZero());
385  br(xn);
386 }
387 
388 
389 void MacroAssembler::Brk(int code) {
390  DCHECK(allow_macro_instructions_);
391  brk(code);
392 }
393 
394 
396  const Register& rn,
397  Condition cond) {
398  DCHECK(allow_macro_instructions_);
399  DCHECK(!rd.IsZero());
400  DCHECK((cond != al) && (cond != nv));
401  cinc(rd, rn, cond);
402 }
403 
404 
406  const Register& rn,
407  Condition cond) {
408  DCHECK(allow_macro_instructions_);
409  DCHECK(!rd.IsZero());
410  DCHECK((cond != al) && (cond != nv));
411  cinv(rd, rn, cond);
412 }
413 
414 
415 void MacroAssembler::Cls(const Register& rd, const Register& rn) {
416  DCHECK(allow_macro_instructions_);
417  DCHECK(!rd.IsZero());
418  cls(rd, rn);
419 }
420 
421 
422 void MacroAssembler::Clz(const Register& rd, const Register& rn) {
423  DCHECK(allow_macro_instructions_);
424  DCHECK(!rd.IsZero());
425  clz(rd, rn);
426 }
427 
428 
430  const Register& rn,
431  Condition cond) {
432  DCHECK(allow_macro_instructions_);
433  DCHECK(!rd.IsZero());
434  DCHECK((cond != al) && (cond != nv));
435  cneg(rd, rn, cond);
436 }
437 
438 
439 // Conditionally zero the destination register. Only X registers are supported
440 // due to the truncation side-effect when used on W registers.
442  Condition cond) {
443  DCHECK(allow_macro_instructions_);
444  DCHECK(!rd.IsSP() && rd.Is64Bits());
445  DCHECK((cond != al) && (cond != nv));
446  csel(rd, xzr, rd, cond);
447 }
448 
449 
450 // Conditionally move a value into the destination register. Only X registers
451 // are supported due to the truncation side-effect when used on W registers.
453  const Register& rn,
454  Condition cond) {
455  DCHECK(allow_macro_instructions_);
456  DCHECK(!rd.IsSP());
457  DCHECK(rd.Is64Bits() && rn.Is64Bits());
458  DCHECK((cond != al) && (cond != nv));
459  if (!rd.is(rn)) {
460  csel(rd, rn, rd, cond);
461  }
462 }
463 
464 
465 void MacroAssembler::Cset(const Register& rd, Condition cond) {
466  DCHECK(allow_macro_instructions_);
467  DCHECK(!rd.IsZero());
468  DCHECK((cond != al) && (cond != nv));
469  cset(rd, cond);
470 }
471 
472 
473 void MacroAssembler::Csetm(const Register& rd, Condition cond) {
474  DCHECK(allow_macro_instructions_);
475  DCHECK(!rd.IsZero());
476  DCHECK((cond != al) && (cond != nv));
477  csetm(rd, cond);
478 }
479 
480 
482  const Register& rn,
483  const Register& rm,
484  Condition cond) {
485  DCHECK(allow_macro_instructions_);
486  DCHECK(!rd.IsZero());
487  DCHECK((cond != al) && (cond != nv));
488  csinc(rd, rn, rm, cond);
489 }
490 
491 
493  const Register& rn,
494  const Register& rm,
495  Condition cond) {
496  DCHECK(allow_macro_instructions_);
497  DCHECK(!rd.IsZero());
498  DCHECK((cond != al) && (cond != nv));
499  csinv(rd, rn, rm, cond);
500 }
501 
502 
504  const Register& rn,
505  const Register& rm,
506  Condition cond) {
507  DCHECK(allow_macro_instructions_);
508  DCHECK(!rd.IsZero());
509  DCHECK((cond != al) && (cond != nv));
510  csneg(rd, rn, rm, cond);
511 }
512 
513 
515  DCHECK(allow_macro_instructions_);
516  dmb(domain, type);
517 }
518 
519 
521  DCHECK(allow_macro_instructions_);
522  dsb(domain, type);
523 }
524 
525 
526 void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) {
527  DCHECK(allow_macro_instructions_);
528  debug(message, code, params);
529 }
530 
531 
533  const Register& rn,
534  const Register& rm,
535  unsigned lsb) {
536  DCHECK(allow_macro_instructions_);
537  DCHECK(!rd.IsZero());
538  extr(rd, rn, rm, lsb);
539 }
540 
541 
542 void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) {
543  DCHECK(allow_macro_instructions_);
544  fabs(fd, fn);
545 }
546 
547 
549  const FPRegister& fn,
550  const FPRegister& fm) {
551  DCHECK(allow_macro_instructions_);
552  fadd(fd, fn, fm);
553 }
554 
555 
557  const FPRegister& fm,
558  StatusFlags nzcv,
559  Condition cond) {
560  DCHECK(allow_macro_instructions_);
561  DCHECK((cond != al) && (cond != nv));
562  fccmp(fn, fm, nzcv, cond);
563 }
564 
565 
566 void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) {
567  DCHECK(allow_macro_instructions_);
568  fcmp(fn, fm);
569 }
570 
571 
572 void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
573  DCHECK(allow_macro_instructions_);
574  if (value != 0.0) {
575  UseScratchRegisterScope temps(this);
576  FPRegister tmp = temps.AcquireSameSizeAs(fn);
577  Fmov(tmp, value);
578  fcmp(fn, tmp);
579  } else {
580  fcmp(fn, value);
581  }
582 }
583 
584 
586  const FPRegister& fn,
587  const FPRegister& fm,
588  Condition cond) {
589  DCHECK(allow_macro_instructions_);
590  DCHECK((cond != al) && (cond != nv));
591  fcsel(fd, fn, fm, cond);
592 }
593 
594 
595 void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) {
596  DCHECK(allow_macro_instructions_);
597  fcvt(fd, fn);
598 }
599 
600 
601 void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) {
602  DCHECK(allow_macro_instructions_);
603  DCHECK(!rd.IsZero());
604  fcvtas(rd, fn);
605 }
606 
607 
608 void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) {
609  DCHECK(allow_macro_instructions_);
610  DCHECK(!rd.IsZero());
611  fcvtau(rd, fn);
612 }
613 
614 
615 void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) {
616  DCHECK(allow_macro_instructions_);
617  DCHECK(!rd.IsZero());
618  fcvtms(rd, fn);
619 }
620 
621 
622 void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) {
623  DCHECK(allow_macro_instructions_);
624  DCHECK(!rd.IsZero());
625  fcvtmu(rd, fn);
626 }
627 
628 
629 void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) {
630  DCHECK(allow_macro_instructions_);
631  DCHECK(!rd.IsZero());
632  fcvtns(rd, fn);
633 }
634 
635 
636 void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) {
637  DCHECK(allow_macro_instructions_);
638  DCHECK(!rd.IsZero());
639  fcvtnu(rd, fn);
640 }
641 
642 
643 void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) {
644  DCHECK(allow_macro_instructions_);
645  DCHECK(!rd.IsZero());
646  fcvtzs(rd, fn);
647 }
648 void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) {
649  DCHECK(allow_macro_instructions_);
650  DCHECK(!rd.IsZero());
651  fcvtzu(rd, fn);
652 }
653 
654 
656  const FPRegister& fn,
657  const FPRegister& fm) {
658  DCHECK(allow_macro_instructions_);
659  fdiv(fd, fn, fm);
660 }
661 
662 
664  const FPRegister& fn,
665  const FPRegister& fm,
666  const FPRegister& fa) {
667  DCHECK(allow_macro_instructions_);
668  fmadd(fd, fn, fm, fa);
669 }
670 
671 
673  const FPRegister& fn,
674  const FPRegister& fm) {
675  DCHECK(allow_macro_instructions_);
676  fmax(fd, fn, fm);
677 }
678 
679 
681  const FPRegister& fn,
682  const FPRegister& fm) {
683  DCHECK(allow_macro_instructions_);
684  fmaxnm(fd, fn, fm);
685 }
686 
687 
689  const FPRegister& fn,
690  const FPRegister& fm) {
691  DCHECK(allow_macro_instructions_);
692  fmin(fd, fn, fm);
693 }
694 
695 
697  const FPRegister& fn,
698  const FPRegister& fm) {
699  DCHECK(allow_macro_instructions_);
700  fminnm(fd, fn, fm);
701 }
702 
703 
705  DCHECK(allow_macro_instructions_);
706  // Only emit an instruction if fd and fn are different, and they are both D
707  // registers. fmov(s0, s0) is not a no-op because it clears the top word of
708  // d0. Technically, fmov(d0, d0) is not a no-op either because it clears the
709  // top of q0, but FPRegister does not currently support Q registers.
710  if (!fd.Is(fn) || !fd.Is64Bits()) {
711  fmov(fd, fn);
712  }
713 }
714 
715 
717  DCHECK(allow_macro_instructions_);
718  fmov(fd, rn);
719 }
720 
721 
722 void MacroAssembler::Fmov(FPRegister fd, double imm) {
723  DCHECK(allow_macro_instructions_);
724  if (fd.Is32Bits()) {
725  Fmov(fd, static_cast<float>(imm));
726  return;
727  }
728 
729  DCHECK(fd.Is64Bits());
730  if (IsImmFP64(imm)) {
731  fmov(fd, imm);
732  } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
733  fmov(fd, xzr);
734  } else {
735  Ldr(fd, imm);
736  }
737 }
738 
739 
740 void MacroAssembler::Fmov(FPRegister fd, float imm) {
741  DCHECK(allow_macro_instructions_);
742  if (fd.Is64Bits()) {
743  Fmov(fd, static_cast<double>(imm));
744  return;
745  }
746 
747  DCHECK(fd.Is32Bits());
748  if (IsImmFP32(imm)) {
749  fmov(fd, imm);
750  } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
751  fmov(fd, wzr);
752  } else {
753  UseScratchRegisterScope temps(this);
754  Register tmp = temps.AcquireW();
755  // TODO(all): Use Assembler::ldr(const FPRegister& ft, float imm).
756  Mov(tmp, float_to_rawbits(imm));
757  Fmov(fd, tmp);
758  }
759 }
760 
761 
763  DCHECK(allow_macro_instructions_);
764  DCHECK(!rd.IsZero());
765  fmov(rd, fn);
766 }
767 
768 
770  const FPRegister& fn,
771  const FPRegister& fm,
772  const FPRegister& fa) {
773  DCHECK(allow_macro_instructions_);
774  fmsub(fd, fn, fm, fa);
775 }
776 
777 
779  const FPRegister& fn,
780  const FPRegister& fm) {
781  DCHECK(allow_macro_instructions_);
782  fmul(fd, fn, fm);
783 }
784 
785 
786 void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) {
787  DCHECK(allow_macro_instructions_);
788  fneg(fd, fn);
789 }
790 
791 
793  const FPRegister& fn,
794  const FPRegister& fm,
795  const FPRegister& fa) {
796  DCHECK(allow_macro_instructions_);
797  fnmadd(fd, fn, fm, fa);
798 }
799 
800 
802  const FPRegister& fn,
803  const FPRegister& fm,
804  const FPRegister& fa) {
805  DCHECK(allow_macro_instructions_);
806  fnmsub(fd, fn, fm, fa);
807 }
808 
809 
810 void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) {
811  DCHECK(allow_macro_instructions_);
812  frinta(fd, fn);
813 }
814 
815 
816 void MacroAssembler::Frintm(const FPRegister& fd, const FPRegister& fn) {
817  DCHECK(allow_macro_instructions_);
818  frintm(fd, fn);
819 }
820 
821 
822 void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
823  DCHECK(allow_macro_instructions_);
824  frintn(fd, fn);
825 }
826 
827 
828 void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
829  DCHECK(allow_macro_instructions_);
830  frintz(fd, fn);
831 }
832 
833 
834 void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) {
835  DCHECK(allow_macro_instructions_);
836  fsqrt(fd, fn);
837 }
838 
839 
841  const FPRegister& fn,
842  const FPRegister& fm) {
843  DCHECK(allow_macro_instructions_);
844  fsub(fd, fn, fm);
845 }
846 
847 
849  DCHECK(allow_macro_instructions_);
850  hint(code);
851 }
852 
853 
854 void MacroAssembler::Hlt(int code) {
855  DCHECK(allow_macro_instructions_);
856  hlt(code);
857 }
858 
859 
861  DCHECK(allow_macro_instructions_);
862  isb();
863 }
864 
865 
867  const CPURegister& rt2,
868  const MemOperand& src) {
869  DCHECK(allow_macro_instructions_);
870  DCHECK(!AreAliased(rt, rt2));
871  ldnp(rt, rt2, src);
872 }
873 
874 
875 void MacroAssembler::Ldr(const CPURegister& rt, const Immediate& imm) {
876  DCHECK(allow_macro_instructions_);
877  ldr(rt, imm);
878 }
879 
880 
881 void MacroAssembler::Ldr(const CPURegister& rt, double imm) {
882  DCHECK(allow_macro_instructions_);
883  DCHECK(rt.Is64Bits());
884  ldr(rt, Immediate(double_to_rawbits(imm)));
885 }
886 
887 
889  const Register& rn,
890  unsigned shift) {
891  DCHECK(allow_macro_instructions_);
892  DCHECK(!rd.IsZero());
893  lsl(rd, rn, shift);
894 }
895 
896 
898  const Register& rn,
899  const Register& rm) {
900  DCHECK(allow_macro_instructions_);
901  DCHECK(!rd.IsZero());
902  lslv(rd, rn, rm);
903 }
904 
905 
907  const Register& rn,
908  unsigned shift) {
909  DCHECK(allow_macro_instructions_);
910  DCHECK(!rd.IsZero());
911  lsr(rd, rn, shift);
912 }
913 
914 
916  const Register& rn,
917  const Register& rm) {
918  DCHECK(allow_macro_instructions_);
919  DCHECK(!rd.IsZero());
920  lsrv(rd, rn, rm);
921 }
922 
923 
925  const Register& rn,
926  const Register& rm,
927  const Register& ra) {
928  DCHECK(allow_macro_instructions_);
929  DCHECK(!rd.IsZero());
930  madd(rd, rn, rm, ra);
931 }
932 
933 
935  const Register& rn,
936  const Register& rm) {
937  DCHECK(allow_macro_instructions_);
938  DCHECK(!rd.IsZero());
939  mneg(rd, rn, rm);
940 }
941 
942 
943 void MacroAssembler::Mov(const Register& rd, const Register& rn) {
944  DCHECK(allow_macro_instructions_);
945  DCHECK(!rd.IsZero());
946  // Emit a register move only if the registers are distinct, or if they are
947  // not X registers. Note that mov(w0, w0) is not a no-op because it clears
948  // the top word of x0.
949  if (!rd.Is(rn) || !rd.Is64Bits()) {
950  Assembler::mov(rd, rn);
951  }
952 }
953 
954 
955 void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
956  DCHECK(allow_macro_instructions_);
957  DCHECK(!rd.IsZero());
958  movk(rd, imm, shift);
959 }
960 
961 
962 void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
963  DCHECK(allow_macro_instructions_);
964  DCHECK(!rt.IsZero());
965  mrs(rt, sysreg);
966 }
967 
968 
969 void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) {
970  DCHECK(allow_macro_instructions_);
971  msr(sysreg, rt);
972 }
973 
974 
976  const Register& rn,
977  const Register& rm,
978  const Register& ra) {
979  DCHECK(allow_macro_instructions_);
980  DCHECK(!rd.IsZero());
981  msub(rd, rn, rm, ra);
982 }
983 
984 
986  const Register& rn,
987  const Register& rm) {
988  DCHECK(allow_macro_instructions_);
989  DCHECK(!rd.IsZero());
990  mul(rd, rn, rm);
991 }
992 
993 
994 void MacroAssembler::Rbit(const Register& rd, const Register& rn) {
995  DCHECK(allow_macro_instructions_);
996  DCHECK(!rd.IsZero());
997  rbit(rd, rn);
998 }
999 
1000 
1002  DCHECK(allow_macro_instructions_);
1003  DCHECK(!xn.IsZero());
1004  ret(xn);
1005  CheckVeneerPool(false, false);
1006 }
1007 
1008 
1009 void MacroAssembler::Rev(const Register& rd, const Register& rn) {
1010  DCHECK(allow_macro_instructions_);
1011  DCHECK(!rd.IsZero());
1012  rev(rd, rn);
1013 }
1014 
1015 
1016 void MacroAssembler::Rev16(const Register& rd, const Register& rn) {
1017  DCHECK(allow_macro_instructions_);
1018  DCHECK(!rd.IsZero());
1019  rev16(rd, rn);
1020 }
1021 
1022 
1023 void MacroAssembler::Rev32(const Register& rd, const Register& rn) {
1024  DCHECK(allow_macro_instructions_);
1025  DCHECK(!rd.IsZero());
1026  rev32(rd, rn);
1027 }
1028 
1029 
1031  const Register& rs,
1032  unsigned shift) {
1033  DCHECK(allow_macro_instructions_);
1034  DCHECK(!rd.IsZero());
1035  ror(rd, rs, shift);
1036 }
1037 
1038 
1040  const Register& rn,
1041  const Register& rm) {
1042  DCHECK(allow_macro_instructions_);
1043  DCHECK(!rd.IsZero());
1044  rorv(rd, rn, rm);
1045 }
1046 
1047 
1049  const Register& rn,
1050  unsigned lsb,
1051  unsigned width) {
1052  DCHECK(allow_macro_instructions_);
1053  DCHECK(!rd.IsZero());
1054  sbfiz(rd, rn, lsb, width);
1055 }
1056 
1057 
1059  const Register& rn,
1060  unsigned lsb,
1061  unsigned width) {
1062  DCHECK(allow_macro_instructions_);
1063  DCHECK(!rd.IsZero());
1064  sbfx(rd, rn, lsb, width);
1065 }
1066 
1067 
1069  const Register& rn,
1070  unsigned fbits) {
1071  DCHECK(allow_macro_instructions_);
1072  scvtf(fd, rn, fbits);
1073 }
1074 
1075 
1077  const Register& rn,
1078  const Register& rm) {
1079  DCHECK(allow_macro_instructions_);
1080  DCHECK(!rd.IsZero());
1081  sdiv(rd, rn, rm);
1082 }
1083 
1084 
1086  const Register& rn,
1087  const Register& rm,
1088  const Register& ra) {
1089  DCHECK(allow_macro_instructions_);
1090  DCHECK(!rd.IsZero());
1091  smaddl(rd, rn, rm, ra);
1092 }
1093 
1094 
1096  const Register& rn,
1097  const Register& rm,
1098  const Register& ra) {
1099  DCHECK(allow_macro_instructions_);
1100  DCHECK(!rd.IsZero());
1101  smsubl(rd, rn, rm, ra);
1102 }
1103 
1104 
1106  const Register& rn,
1107  const Register& rm) {
1108  DCHECK(allow_macro_instructions_);
1109  DCHECK(!rd.IsZero());
1110  smull(rd, rn, rm);
1111 }
1112 
1113 
1115  const Register& rn,
1116  const Register& rm) {
1117  DCHECK(allow_macro_instructions_);
1118  DCHECK(!rd.IsZero());
1119  smulh(rd, rn, rm);
1120 }
1121 
1122 
1124  const CPURegister& rt2,
1125  const MemOperand& dst) {
1126  DCHECK(allow_macro_instructions_);
1127  stnp(rt, rt2, dst);
1128 }
1129 
1130 
1131 void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
1132  DCHECK(allow_macro_instructions_);
1133  DCHECK(!rd.IsZero());
1134  sxtb(rd, rn);
1135 }
1136 
1137 
1138 void MacroAssembler::Sxth(const Register& rd, const Register& rn) {
1139  DCHECK(allow_macro_instructions_);
1140  DCHECK(!rd.IsZero());
1141  sxth(rd, rn);
1142 }
1143 
1144 
1145 void MacroAssembler::Sxtw(const Register& rd, const Register& rn) {
1146  DCHECK(allow_macro_instructions_);
1147  DCHECK(!rd.IsZero());
1148  sxtw(rd, rn);
1149 }
1150 
1151 
1153  const Register& rn,
1154  unsigned lsb,
1155  unsigned width) {
1156  DCHECK(allow_macro_instructions_);
1157  DCHECK(!rd.IsZero());
1158  ubfiz(rd, rn, lsb, width);
1159 }
1160 
1161 
1163  const Register& rn,
1164  unsigned lsb,
1165  unsigned width) {
1166  DCHECK(allow_macro_instructions_);
1167  DCHECK(!rd.IsZero());
1168  ubfx(rd, rn, lsb, width);
1169 }
1170 
1171 
1173  const Register& rn,
1174  unsigned fbits) {
1175  DCHECK(allow_macro_instructions_);
1176  ucvtf(fd, rn, fbits);
1177 }
1178 
1179 
1181  const Register& rn,
1182  const Register& rm) {
1183  DCHECK(allow_macro_instructions_);
1184  DCHECK(!rd.IsZero());
1185  udiv(rd, rn, rm);
1186 }
1187 
1188 
1190  const Register& rn,
1191  const Register& rm,
1192  const Register& ra) {
1193  DCHECK(allow_macro_instructions_);
1194  DCHECK(!rd.IsZero());
1195  umaddl(rd, rn, rm, ra);
1196 }
1197 
1198 
1200  const Register& rn,
1201  const Register& rm,
1202  const Register& ra) {
1203  DCHECK(allow_macro_instructions_);
1204  DCHECK(!rd.IsZero());
1205  umsubl(rd, rn, rm, ra);
1206 }
1207 
1208 
1209 void MacroAssembler::Uxtb(const Register& rd, const Register& rn) {
1210  DCHECK(allow_macro_instructions_);
1211  DCHECK(!rd.IsZero());
1212  uxtb(rd, rn);
1213 }
1214 
1215 
1216 void MacroAssembler::Uxth(const Register& rd, const Register& rn) {
1217  DCHECK(allow_macro_instructions_);
1218  DCHECK(!rd.IsZero());
1219  uxth(rd, rn);
1220 }
1221 
1222 
1223 void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
1224  DCHECK(allow_macro_instructions_);
1225  DCHECK(!rd.IsZero());
1226  uxtw(rd, rn);
1227 }
1228 
1229 
1231  DCHECK(!csp.Is(sp_));
1232  if (!TmpList()->IsEmpty()) {
1234  UseScratchRegisterScope temps(this);
1235  Register temp = temps.AcquireX();
1236  Sub(temp, StackPointer(), space);
1237  Bic(csp, temp, 0xf);
1238  } else {
1239  Sub(csp, StackPointer(), space);
1240  }
1241  } else {
1242  // TODO(jbramley): Several callers rely on this not using scratch
1243  // registers, so we use the assembler directly here. However, this means
1244  // that large immediate values of 'space' cannot be handled cleanly. (Only
1245  // 24-bits immediates or values of 'space' that can be encoded in one
1246  // instruction are accepted.) Once we implement our flexible scratch
1247  // register idea, we could greatly simplify this function.
1248  InstructionAccurateScope scope(this);
1249  DCHECK(space.IsImmediate());
1250  // Align to 16 bytes.
1251  uint64_t imm = RoundUp(space.ImmediateValue(), 0x10);
1252  DCHECK(is_uint24(imm));
1253 
1254  Register source = StackPointer();
1256  bic(csp, source, 0xf);
1257  source = csp;
1258  }
1259  if (!is_uint12(imm)) {
1260  int64_t imm_top_12_bits = imm >> 12;
1261  sub(csp, source, imm_top_12_bits << 12);
1262  source = csp;
1263  imm -= imm_top_12_bits << 12;
1264  }
1265  if (imm > 0) {
1266  sub(csp, source, imm);
1267  }
1268  }
1270 }
1271 
1272 
1275  DCHECK(!csp.Is(sp_));
1276  { InstructionAccurateScope scope(this);
1278  bic(csp, StackPointer(), 0xf);
1279  } else {
1280  mov(csp, StackPointer());
1281  }
1282  }
1284 }
1285 
1286 
1288  ExternalReference roots_array_start =
1289  ExternalReference::roots_array_start(isolate());
1290  Mov(root, Operand(roots_array_start));
1291 }
1292 
1293 
1296  static_cast<unsigned>(kSmiShift + kSmiValueSize));
1297  DCHECK(dst.Is64Bits() && src.Is64Bits());
1298  Lsl(dst, src, kSmiShift);
1299 }
1300 
1301 
1302 void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
1303 
1304 
1307  static_cast<unsigned>(kSmiShift + kSmiValueSize));
1308  DCHECK(dst.Is64Bits() && src.Is64Bits());
1310  AssertSmi(src);
1311  }
1312  Asr(dst, src, kSmiShift);
1313 }
1314 
1315 
1317 
1318 
1320  Register src,
1321  UntagMode mode) {
1322  DCHECK(dst.Is64Bits() && src.Is64Bits());
1324  AssertSmi(src);
1325  }
1326  Scvtf(dst, src, kSmiShift);
1327 }
1328 
1329 
1331  Register src,
1332  UntagMode mode) {
1333  DCHECK(dst.Is32Bits() && src.Is64Bits());
1335  AssertSmi(src);
1336  }
1337  Scvtf(dst, src, kSmiShift);
1338 }
1339 
1340 
1342  STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) &&
1343  (static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) &&
1344  (kSmiTag == 0));
1345  Push(src.W(), wzr);
1346 }
1347 
1348 
1350  STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) &&
1351  (static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) &&
1352  (kSmiTag == 0));
1353  Push(src1.W(), wzr, src2.W(), wzr);
1354 }
1355 
1356 
1358  Label* smi_label,
1359  Label* not_smi_label) {
1360  STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
1361  // Check if the tag bit is set.
1362  if (smi_label) {
1363  Tbz(value, 0, smi_label);
1364  if (not_smi_label) {
1365  B(not_smi_label);
1366  }
1367  } else {
1368  DCHECK(not_smi_label);
1369  Tbnz(value, 0, not_smi_label);
1370  }
1371 }
1372 
1373 
1374 void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
1375  JumpIfSmi(value, NULL, not_smi_label);
1376 }
1377 
1378 
1380  Register value2,
1381  Label* both_smi_label,
1382  Label* not_smi_label) {
1383  STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
1384  UseScratchRegisterScope temps(this);
1385  Register tmp = temps.AcquireX();
1386  // Check if both tag bits are clear.
1387  Orr(tmp, value1, value2);
1388  JumpIfSmi(tmp, both_smi_label, not_smi_label);
1389 }
1390 
1391 
1393  Register value2,
1394  Label* either_smi_label,
1395  Label* not_smi_label) {
1396  STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
1397  UseScratchRegisterScope temps(this);
1398  Register tmp = temps.AcquireX();
1399  // Check if either tag bit is clear.
1400  And(tmp, value1, value2);
1401  JumpIfSmi(tmp, either_smi_label, not_smi_label);
1402 }
1403 
1404 
1406  Register value2,
1407  Label* not_smi_label) {
1408  JumpIfBothSmi(value1, value2, NULL, not_smi_label);
1409 }
1410 
1411 
1413  Register value2,
1414  Label* not_smi_label) {
1415  JumpIfEitherSmi(value1, value2, NULL, not_smi_label);
1416 }
1417 
1418 
1421  if (emit_debug_code()) {
1422  Label ok;
1423  Tbz(obj, 0, &ok);
1424  Abort(kObjectTagged);
1425  Bind(&ok);
1426  }
1427  Orr(tagged_obj, obj, kHeapObjectTag);
1428 }
1429 
1430 
1433  if (emit_debug_code()) {
1434  Label ok;
1435  Tbnz(obj, 0, &ok);
1436  Abort(kObjectNotTagged);
1437  Bind(&ok);
1438  }
1439  Bic(untagged_obj, obj, kHeapObjectTag);
1440 }
1441 
1442 
1444  Register type,
1445  Label* fail) {
1446  CompareObjectType(object, type, type, LAST_NAME_TYPE);
1447  B(hi, fail);
1448 }
1449 
1450 
1452  Register map,
1453  Register scratch,
1454  Label* fail) {
1456  IsInstanceJSObjectType(map, scratch, fail);
1457 }
1458 
1459 
1461  Register scratch,
1462  Label* fail) {
1463  Ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1464  // If cmp result is lt, the following ccmp will clear all flags.
1465  // Z == 0, N == V implies gt condition.
1468 
1469  // If we didn't get a valid label object just fall through and leave the
1470  // flags updated.
1471  if (fail != NULL) {
1472  B(gt, fail);
1473  }
1474 }
1475 
1476 
1478  Register type,
1479  Label* not_string,
1480  Label* string) {
1481  Ldr(type, FieldMemOperand(object, HeapObject::kMapOffset));
1482  Ldrb(type.W(), FieldMemOperand(type, Map::kInstanceTypeOffset));
1483 
1484  STATIC_ASSERT(kStringTag == 0);
1485  DCHECK((string != NULL) || (not_string != NULL));
1486  if (string == NULL) {
1487  TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
1488  } else if (not_string == NULL) {
1489  TestAndBranchIfAllClear(type.W(), kIsNotStringMask, string);
1490  } else {
1491  TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
1492  B(string);
1493  }
1494 }
1495 
1496 
1498  UseScratchRegisterScope temps(this);
1499  Register tmp = temps.AcquireX();
1500  Mov(tmp, Operand(handle));
1501  Push(tmp);
1502 }
1503 
1504 
1505 void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
1506  uint64_t size = count * unit_size;
1507 
1508  if (size == 0) {
1509  return;
1510  }
1511 
1512  if (csp.Is(StackPointer())) {
1513  DCHECK(size % 16 == 0);
1514  } else {
1516  }
1517 
1519 }
1520 
1521 
1522 void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
1523  if (unit_size == 0) return;
1524  DCHECK(base::bits::IsPowerOfTwo64(unit_size));
1525 
1526  const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
1527  const Operand size(count, LSL, shift);
1528 
1529  if (size.IsZero()) {
1530  return;
1531  }
1532 
1533  if (!csp.Is(StackPointer())) {
1535  }
1536 
1538 }
1539 
1540 
1541 void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
1542  DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo64(unit_size));
1543  const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
1544  const Operand size(count_smi,
1545  (shift >= 0) ? (LSL) : (LSR),
1546  (shift >= 0) ? (shift) : (-shift));
1547 
1548  if (size.IsZero()) {
1549  return;
1550  }
1551 
1552  if (!csp.Is(StackPointer())) {
1554  }
1555 
1557 }
1558 
1559 
1560 void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
1561  uint64_t size = count * unit_size;
1562 
1563  if (size == 0) {
1564  return;
1565  }
1566 
1568 
1569  if (csp.Is(StackPointer())) {
1570  DCHECK(size % 16 == 0);
1571  } else if (emit_debug_code()) {
1572  // It is safe to leave csp where it is when unwinding the JavaScript stack,
1573  // but if we keep it matching StackPointer, the simulator can detect memory
1574  // accesses in the now-free part of the stack.
1576  }
1577 }
1578 
1579 
1580 void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
1581  if (unit_size == 0) return;
1582  DCHECK(base::bits::IsPowerOfTwo64(unit_size));
1583 
1584  const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
1585  const Operand size(count, LSL, shift);
1586 
1587  if (size.IsZero()) {
1588  return;
1589  }
1590 
1592 
1593  if (!csp.Is(StackPointer()) && emit_debug_code()) {
1594  // It is safe to leave csp where it is when unwinding the JavaScript stack,
1595  // but if we keep it matching StackPointer, the simulator can detect memory
1596  // accesses in the now-free part of the stack.
1598  }
1599 }
1600 
1601 
1602 void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
1603  DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo64(unit_size));
1604  const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
1605  const Operand size(count_smi,
1606  (shift >= 0) ? (LSL) : (LSR),
1607  (shift >= 0) ? (shift) : (-shift));
1608 
1609  if (size.IsZero()) {
1610  return;
1611  }
1612 
1614 
1615  if (!csp.Is(StackPointer()) && emit_debug_code()) {
1616  // It is safe to leave csp where it is when unwinding the JavaScript stack,
1617  // but if we keep it matching StackPointer, the simulator can detect memory
1618  // accesses in the now-free part of the stack.
1620  }
1621 }
1622 
1623 
1625  const Operand& rhs,
1626  Condition cond,
1627  Label* label) {
1628  if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) &&
1629  ((cond == eq) || (cond == ne))) {
1630  if (cond == eq) {
1631  Cbz(lhs, label);
1632  } else {
1633  Cbnz(lhs, label);
1634  }
1635  } else {
1636  Cmp(lhs, rhs);
1637  B(cond, label);
1638  }
1639 }
1640 
1641 
1643  const uint64_t bit_pattern,
1644  Label* label) {
1645  int bits = reg.SizeInBits();
1646  DCHECK(CountSetBits(bit_pattern, bits) > 0);
1647  if (CountSetBits(bit_pattern, bits) == 1) {
1648  Tbnz(reg, MaskToBit(bit_pattern), label);
1649  } else {
1650  Tst(reg, bit_pattern);
1651  B(ne, label);
1652  }
1653 }
1654 
1655 
1657  const uint64_t bit_pattern,
1658  Label* label) {
1659  int bits = reg.SizeInBits();
1660  DCHECK(CountSetBits(bit_pattern, bits) > 0);
1661  if (CountSetBits(bit_pattern, bits) == 1) {
1662  Tbz(reg, MaskToBit(bit_pattern), label);
1663  } else {
1664  Tst(reg, bit_pattern);
1665  B(eq, label);
1666  }
1667 }
1668 
1669 
1670 void MacroAssembler::InlineData(uint64_t data) {
1671  DCHECK(is_uint16(data));
1672  InstructionAccurateScope scope(this, 1);
1673  movz(xzr, data);
1674 }
1675 
1676 
1678  InstructionAccurateScope scope(this, 1);
1680 }
1681 
1682 
1684  InstructionAccurateScope scope(this, 1);
1686 }
1687 
1688 
1689 void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
1690  DCHECK(strlen(marker_name) == 2);
1691 
1692  // We allow only printable characters in the marker names. Unprintable
1693  // characters are reserved for controlling features of the instrumentation.
1694  DCHECK(isprint(marker_name[0]) && isprint(marker_name[1]));
1695 
1696  InstructionAccurateScope scope(this, 1);
1697  movn(xzr, (marker_name[1] << 8) | marker_name[0]);
1698 }
1699 
1700 } } // namespace v8::internal
1701 
1702 #endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
An object reference managed by the v8 garbage collector.
Definition: v8.h:198
Isolate * isolate() const
Definition: assembler.h:62
bool emit_debug_code() const
Definition: assembler.h:65
void rev32(const Register &rd, const Register &rn)
void cset(const Register &rd, Condition cond)
void umaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void fcvtms(const Register &rd, const FPRegister &fn)
void fmov(FPRegister fd, double imm)
void uxtb(Register dst, const Operand &src, Condition cond=al)
void fcvtmu(const Register &rd, const FPRegister &fn)
void csetm(const Register &rd, Condition cond)
void rbit(const Register &rd, const Register &rn)
void frintn(const FPRegister &fd, const FPRegister &fn)
void movn(const Register &rd, uint64_t imm, int shift=-1)
void hint(SystemHint code)
void fnmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void bfi(Register dst, Register src, int lsb, int width, Condition cond=al)
void fminnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void stnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
void csinc(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void smull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void br(const Register &xn)
void fcmp(const FPRegister &fn, const FPRegister &fm)
void extr(const Register &rd, const Register &rn, const Register &rm, unsigned lsb)
void fcvtzu(const Register &rd, const FPRegister &fn)
void sxtw(const Register &rd, const Register &rn)
void fmax(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void mneg(const Register &rd, const Register &rn, const Register &rm)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void uxth(const Register &rd, const Register &rn)
void asrv(const Register &rd, const Register &rn, const Register &rm)
void ror(const Register &rd, const Register &rs, unsigned shift)
void fcvtnu(const Register &rd, const FPRegister &fn)
void madd(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void sxth(const Register &rd, const Register &rn)
void msr(SRegisterFieldMask fields, const Operand &src, Condition cond=al)
void frinta(const FPRegister &fd, const FPRegister &fn)
void fdiv(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void fcvtzs(const Register &rd, const FPRegister &fn)
void lslv(const Register &rd, const Register &rn, const Register &rm)
void lsr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static bool IsImmAddSub(int64_t immediate)
void shift(Register dst, Immediate shift_amount, int subcode, int size)
void fadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void msub(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void fmin(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void cneg(const Register &rd, const Register &rn, Condition cond)
void dsb(BarrierDomain domain, BarrierType type)
void fcvtns(const Register &rd, const FPRegister &fn)
void dmb(BarrierDomain domain, BarrierType type)
void fcsel(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, Condition cond)
void cls(const Register &rd, const Register &rn)
void rorv(const Register &rd, const Register &rn, const Register &rm)
void smaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void fmaxnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void clz(Register dst, Register src, Condition cond=al)
void scvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void udiv(Register dst, Register src1, Register src2, Condition cond=al)
void smsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void bl(int branch_offset, Condition cond=al)
void fneg(const FPRegister &fd, const FPRegister &fn)
void frintz(const FPRegister &fd, const FPRegister &fn)
void lsl(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void cinc(const Register &rd, const Register &rn, Condition cond)
void smulh(const Register &rd, const Register &rn, const Register &rm)
void b(int branch_offset, Condition cond=al)
void fmul(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
void csel(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void fnmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void ldnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void ucvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
void mrs(Register dst, SRegister s, Condition cond=al)
void movz(const Register &rd, uint64_t imm, int shift=-1)
void rev16(const Register &rd, const Register &rn)
void bfxil(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void fcvt(const FPRegister &fd, const FPRegister &fn)
void sdiv(Register dst, Register src1, Register src2, Condition cond=al)
void sbfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void movk(const Register &rd, uint64_t imm, int shift=-1)
void frintm(const FPRegister &fd, const FPRegister &fn)
void CheckVeneerPool(bool force_emit, bool require_jump, int margin=kVeneerDistanceMargin)
void cinv(const Register &rd, const Register &rn, Condition cond)
void fcvtau(const Register &rd, const FPRegister &fn)
void blr(const Register &xn)
void fsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
void bic(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void fmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void fcvtas(const Register &rd, const FPRegister &fn)
static bool IsImmFP64(double imm)
void fmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void uxtw(const Register &rd, const Register &rn)
void fccmp(const FPRegister &fn, const FPRegister &fm, StatusFlags nzcv, Condition cond)
void sxtb(const Register &rd, const Register &rn)
void debug(const char *message, uint32_t code, Instr params=BREAK)
void ubfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void rev(const Register &rd, const Register &rn)
void ret(const Register &xn=lr)
void umsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
static bool IsImmFP32(float imm)
void asr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const Register & AppropriateZeroRegFor(const CPURegister &reg) const
void csinv(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void csneg(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void lsrv(const Register &rd, const Register &rn, const Register &rm)
static bool IsSupported(CpuFeature f)
Definition: assembler.h:184
static const int kMapOffset
Definition: objects.h:1427
void Fcvtzs(const Register &rd, const FPRegister &fn)
void Mul(const Register &rd, const Register &rn, const Register &rm)
void CmovX(const Register &rd, const Register &rn, Condition cond)
void Asr(const Register &rd, const Register &rn, unsigned shift)
void Msub(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Sxtb(const Register &rd, const Register &rn)
void Fsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void Stnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
void AddSubWithCarryMacro(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubWithCarryOp op)
void Frintz(const FPRegister &fd, const FPRegister &fn)
void Fnmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void Bic(const Register &rd, const Register &rn, const Operand &operand)
void AnnotateInstrumentation(const char *marker_name)
void AddSubMacro(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
void Madd(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Fmaxnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void Drop(int count, Condition cond=al)
void Dsb(BarrierDomain domain, BarrierType type)
void Udiv(const Register &rd, const Register &rn, const Register &rm)
void Orr(const Register &rd, const Register &rn, const Operand &operand)
void Neg(const Register &rd, const Operand &operand)
void Adcs(const Register &rd, const Register &rn, const Operand &operand)
void IsObjectNameType(Register object, Register scratch, Label *fail)
void Add(const Register &rd, const Register &rn, const Operand &operand)
void Bics(const Register &rd, const Register &rn, const Operand &operand)
void Mneg(const Register &rd, const Register &rn, const Register &rm)
void Umaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Adds(const Register &rd, const Register &rn, const Operand &operand)
void SmiUntag(Register reg, SBit s=LeaveCC)
void IsObjectJSStringType(Register object, Register scratch, Label *fail)
void Csneg(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void Umsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Fabs(const FPRegister &fd, const FPRegister &fn)
void Fmov(FPRegister fd, FPRegister fn)
void Ands(const Register &rd, const Register &rn, const Operand &operand)
void Uxth(const Register &rd, const Register &rn)
void Fdiv(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void Fcvtms(const Register &rd, const FPRegister &fn)
void CompareAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
void Ngcs(const Register &rd, const Operand &operand)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
void Msr(SystemRegister sysreg, const Register &rt)
void Lsr(const Register &rd, const Register &rn, unsigned shift)
void Tst(const Register &rn, const Operand &operand)
void Smsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void JumpIfBothNotSmi(Register value1, Register value2, Label *not_smi_label)
void Bfxil(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void mov(Register rd, Register rt)
void Sxth(const Register &rd, const Register &rn)
void BumpSystemStackPointer(const Operand &space)
void Fcvtmu(const Register &rd, const FPRegister &fn)
void Fmax(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void JumpIfSmi(Register value, Label *smi_label)
void Fsqrt(const FPRegister &fd, const FPRegister &fn)
void Smull(const Register &rd, const Register &rn, const Register &rm)
void Cmn(const Register &rn, const Operand &operand)
void Ldnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void Sbc(const Register &rd, const Register &rn, const Operand &operand)
void Fnmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void SmiUntagToDouble(FPRegister dst, Register src, UntagMode mode=kNotSpeculativeUntag)
void Ror(const Register &rd, const Register &rs, unsigned shift)
void Fcvtas(const Register &rd, const FPRegister &fn)
void TestAndBranchIfAllClear(const Register &reg, const uint64_t bit_pattern, Label *label)
void Smaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void B(Label *label, BranchType type, Register reg=NoReg, int bit=-1)
void Clz(const Register &rd, const Register &rn)
void Bfi(Register dst, Register src, Register scratch, int lsb, int width, Condition cond=al)
void Fmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void Uxtw(const Register &rd, const Register &rn)
STATIC_ASSERT((reg_zero==(reg_not_zero ^ 1)) &&(reg_bit_clear==(reg_bit_set ^ 1)) &&(always==(never ^ 1)))
void Tbz(const Register &rt, unsigned bit_pos, Label *label)
void Dmb(BarrierDomain domain, BarrierType type)
void Ccmn(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
void Fmul(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void Mrs(const Register &rt, SystemRegister sysreg)
void Cinc(const Register &rd, const Register &rn, Condition cond)
const Register & StackPointer() const
void Debug(const char *message, uint32_t code, Instr params=BREAK)
void Lsl(const Register &rd, const Register &rn, unsigned shift)
void Frintm(const FPRegister &fd, const FPRegister &fn)
void SmiTag(Register reg, SBit s=LeaveCC)
void Eor(const Register &rd, const Register &rn, const Operand &operand)
void Fcsel(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, Condition cond)
void IsInstanceJSObjectType(Register map, Register scratch, Label *fail)
void Fmin(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void Sbcs(const Register &rd, const Register &rn, const Operand &operand)
void JumpIfEitherNotSmi(Register value1, Register value2, Label *not_smi_label)
void JumpIfEitherSmi(Register reg1, Register reg2, Label *on_either_smi)
void Abort(BailoutReason msg)
void ClaimBySMI(const Register &count_smi, uint64_t unit_size=kXRegSize)
void Ubfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void Uxtb(const Register &rd, const Register &rn)
void Fneg(const FPRegister &fd, const FPRegister &fn)
void Fcvtzu(const Register &rd, const FPRegister &fn)
void ObjectTag(Register tagged_obj, Register obj)
void Mov(const Register &rd, const Operand &operand, DiscardMoveMode discard_mode=kDontDiscardForSameWReg)
void Sbfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void Csinc(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void DropBySMI(const Register &count_smi, uint64_t unit_size=kXRegSize)
void Sdiv(const Register &rd, const Register &rn, const Register &rm)
void Fminnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void Negs(const Register &rd, const Operand &operand)
void Fccmp(const FPRegister &fn, const FPRegister &fm, StatusFlags nzcv, Condition cond)
void Fcvtns(const Register &rd, const FPRegister &fn)
void Fadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void Fcmp(const FPRegister &fn, const FPRegister &fm)
void Rbit(const Register &rd, const Register &rn)
void Fcvtau(const Register &rd, const FPRegister &fn)
void Orn(const Register &rd, const Register &rn, const Operand &operand)
void LogicalMacro(const Register &rd, const Register &rn, const Operand &operand, LogicalOp op)
void Frintn(const FPRegister &fd, const FPRegister &fn)
void Cls(const Register &rd, const Register &rn)
void Sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void Tbnz(const Register &rt, unsigned bit_pos, Label *label)
void Subs(const Register &rd, const Register &rn, const Operand &operand)
void Rev(const Register &rd, const Register &rn)
void Cset(const Register &rd, Condition cond)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void Smulh(const Register &rd, const Register &rn, const Register &rm)
void Frinta(const FPRegister &fd, const FPRegister &fn)
void Cneg(const Register &rd, const Register &rn, Condition cond)
void Ucvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
void Fmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void Ldr(const CPURegister &rt, const Immediate &imm)
void Ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void Cinv(const Register &rd, const Register &rn, Condition cond)
void Cmp(const Register &rn, const Operand &operand)
void TestAndBranchIfAnySet(const Register &reg, const uint64_t bit_pattern, Label *label)
void IsObjectJSObjectType(Register heap_object, Register map, Register scratch, Label *fail)
void JumpIfBothSmi(Register value1, Register value2, Label *both_smi_label, Label *not_smi_label=NULL)
void Mvn(const Register &rd, uint64_t imm)
void AssertSmi(Register object)
void Fcvt(const FPRegister &fd, const FPRegister &fn)
void Ccmp(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
void Adc(const Register &rd, const Register &rn, const Operand &operand)
void Sub(const Register &rd, const Register &rn, const Operand &operand)
void CzeroX(const Register &rd, Condition cond)
void Rev16(const Register &rd, const Register &rn)
void Fcvtnu(const Register &rd, const FPRegister &fn)
void ObjectUntag(Register untagged_obj, Register obj)
void ConditionalCompareMacro(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op)
void Eon(const Register &rd, const Register &rn, const Operand &operand)
void Cbnz(const Register &rt, Label *label)
void Cbz(const Register &rt, Label *label)
void Csetm(const Register &rd, Condition cond)
void Csinv(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void Sxtw(const Register &rd, const Register &rn)
void Rev32(const Register &rd, const Register &rn)
void Claim(uint64_t count, uint64_t unit_size=kXRegSize)
void SmiUntagToFloat(FPRegister dst, Register src, UntagMode mode=kNotSpeculativeUntag)
void Extr(const Register &rd, const Register &rn, const Register &rm, unsigned lsb)
void Movk(const Register &rd, uint64_t imm, int shift=-1)
void Ngc(const Register &rd, const Operand &operand)
void Scvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
static const int kInstanceTypeOffset
Definition: objects.h:6229
int64_t ImmediateValue() const
Register AcquireSameSizeAs(const Register &reg)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array shift
#define DCHECK(condition)
Definition: logging.h:205
#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP)
#define LSPAIR_MACRO_LIST(V)
#define LS_MACRO_LIST(V)
bool IsPowerOfTwo64(uint64_t value)
Definition: bits.h:83
@ ALWAYS_ALIGN_CSP
Definition: globals.h:632
const int kSmiShift
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const unsigned kXRegSizeInBits
static uint32_t float_to_rawbits(float value)
Definition: utils-arm64.h:27
const int kSmiTagSize
Definition: v8.h:5743
int MaskToBit(uint64_t mask)
const unsigned kWRegSizeInBits
const uint32_t kStringTag
Definition: objects.h:544
@ LAST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:785
@ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:784
@ LAST_NAME_TYPE
Definition: objects.h:755
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:146
MemOperand FieldMemOperand(Register object, int offset)
const bool FLAG_enable_slow_asserts
Definition: checks.h:31
static uint64_t double_to_rawbits(double value)
Definition: utils-arm64.h:34
const int kHeapObjectTag
Definition: v8.h:5737
const int kSmiValueSize
Definition: v8.h:5806
MemOperand UntagSmiFieldMemOperand(Register object, int offset)
static void RoundUp(Vector< char > buffer, int *length, int *decimal_point)
Definition: fixed-dtoa.cc:171
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const int kBitsPerByte
Definition: globals.h:162
int CountSetBits(uint64_t value, int width)
const int kSmiTag
Definition: v8.h:5742
const uint32_t kIsNotStringMask
Definition: objects.h:543
int CountTrailingZeros(uint64_t value, int width)
MemOperand UntagSmiMemOperand(Register object, int offset)
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
bool Is(const CPURegister &other) const
bool is(Register reg) const