V8 Project
debug-arm64.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_ARM64
8 
9 #include "src/codegen.h"
10 #include "src/debug.h"
11 
12 namespace v8 {
13 namespace internal {
14 
15 
16 #define __ ACCESS_MASM(masm)
17 
20 }
21 
22 
24  // Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
25  // the return from JS function sequence from
26  // mov sp, fp
27  // ldp fp, lr, [sp] #16
28  // lrd ip0, [pc, #(3 * kInstructionSize)]
29  // add sp, sp, ip0
30  // ret
31  // <number of paramters ...
32  // ... plus one (64 bits)>
33  // to a call to the debug break return code.
34  // ldr ip0, [pc, #(3 * kInstructionSize)]
35  // blr ip0
36  // hlt kHltBadCode @ code should not return, catch if it does.
37  // <debug break return code ...
38  // ... entry point address (64 bits)>
39 
40  // The patching code must not overflow the space occupied by the return
41  // sequence.
43  PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5);
44  byte* entry =
45  debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry();
46 
47  // The first instruction of a patched return sequence must be a load literal
48  // loading the address of the debug break return code.
49  patcher.ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
50  // TODO(all): check the following is correct.
51  // The debug break return code will push a frame and call statically compiled
52  // code. By using blr, even though control will not return after the branch,
53  // this call site will be registered in the frame (lr being saved as the pc
54  // of the next instruction to execute for this frame). The debugger can now
55  // iterate on the frames to find call to debug break return code.
56  patcher.blr(ip0);
57  patcher.hlt(kHltBadCode);
58  patcher.dc64(reinterpret_cast<int64_t>(entry));
59 }
60 
61 
63  // Reset the code emitted by EmitReturnSequence to its original state.
66 }
67 
68 
69 bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
70  DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
71  return rinfo->IsPatchedReturnSequence();
72 }
73 
74 
77  // Check whether the debug break slot instructions have been patched.
78  return rinfo()->IsPatchedDebugBreakSlotSequence();
79 }
80 
81 
83  // Patch the code emitted by DebugCodegen::GenerateSlots, changing the debug
84  // break slot code from
85  // mov x0, x0 @ nop DEBUG_BREAK_NOP
86  // mov x0, x0 @ nop DEBUG_BREAK_NOP
87  // mov x0, x0 @ nop DEBUG_BREAK_NOP
88  // mov x0, x0 @ nop DEBUG_BREAK_NOP
89  // to a call to the debug slot code.
90  // ldr ip0, [pc, #(2 * kInstructionSize)]
91  // blr ip0
92  // <debug break slot code ...
93  // ... entry point address (64 bits)>
94 
95  // TODO(all): consider adding a hlt instruction after the blr as we don't
96  // expect control to return here. This implies increasing
97  // kDebugBreakSlotInstructions to 5 instructions.
98 
99  // The patching code must not overflow the space occupied by the return
100  // sequence.
102  PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4);
103  byte* entry =
104  debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry();
105 
106  // The first instruction of a patched debug break slot must be a load literal
107  // loading the address of the debug break slot code.
108  patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
109  // TODO(all): check the following is correct.
110  // The debug break slot code will push a frame and call statically compiled
111  // code. By using blr, event hough control will not return after the branch,
112  // this call site will be registered in the frame (lr being saved as the pc
113  // of the next instruction to execute for this frame). The debugger can now
114  // iterate on the frames to find call to debug break slot code.
115  patcher.blr(ip0);
116  patcher.dc64(reinterpret_cast<int64_t>(entry));
117 }
118 
119 
124 }
125 
126 
127 static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
128  RegList object_regs,
129  RegList non_object_regs,
130  Register scratch) {
131  {
132  FrameScope scope(masm, StackFrame::INTERNAL);
133 
134  // Load padding words on stack.
136  __ PushMultipleTimes(scratch, LiveEdit::kFramePaddingInitialSize);
138  __ Push(scratch);
139 
140  // Any live values (object_regs and non_object_regs) in caller-saved
141  // registers (or lr) need to be stored on the stack so that their values are
142  // safely preserved for a call into C code.
143  //
144  // Also:
145  // * object_regs may be modified during the C code by the garbage
146  // collector. Every object register must be a valid tagged pointer or
147  // SMI.
148  //
149  // * non_object_regs will be converted to SMIs so that the garbage
150  // collector doesn't try to interpret them as pointers.
151  //
152  // TODO(jbramley): Why can't this handle callee-saved registers?
153  DCHECK((~kCallerSaved.list() & object_regs) == 0);
154  DCHECK((~kCallerSaved.list() & non_object_regs) == 0);
155  DCHECK((object_regs & non_object_regs) == 0);
156  DCHECK((scratch.Bit() & object_regs) == 0);
157  DCHECK((scratch.Bit() & non_object_regs) == 0);
158  DCHECK((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
160 
161  CPURegList non_object_list =
162  CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
163  while (!non_object_list.IsEmpty()) {
164  // Store each non-object register as two SMIs.
165  Register reg = Register(non_object_list.PopLowestIndex());
166  __ Lsr(scratch, reg, 32);
167  __ SmiTagAndPush(scratch, reg);
168 
169  // Stack:
170  // jssp[12]: reg[63:32]
171  // jssp[8]: 0x00000000 (SMI tag & padding)
172  // jssp[4]: reg[31:0]
173  // jssp[0]: 0x00000000 (SMI tag & padding)
174  STATIC_ASSERT(kSmiTag == 0);
175  STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == kWRegSizeInBits);
176  }
177 
178  if (object_regs != 0) {
179  __ PushXRegList(object_regs);
180  }
181 
182 #ifdef DEBUG
183  __ RecordComment("// Calling from debug break to runtime - come in - over");
184 #endif
185  __ Mov(x0, 0); // No arguments.
186  __ Mov(x1, ExternalReference::debug_break(masm->isolate()));
187 
188  CEntryStub stub(masm->isolate(), 1);
189  __ CallStub(&stub);
190 
191  // Restore the register values from the expression stack.
192  if (object_regs != 0) {
193  __ PopXRegList(object_regs);
194  }
195 
196  non_object_list =
197  CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
198  while (!non_object_list.IsEmpty()) {
199  // Load each non-object register from two SMIs.
200  // Stack:
201  // jssp[12]: reg[63:32]
202  // jssp[8]: 0x00000000 (SMI tag & padding)
203  // jssp[4]: reg[31:0]
204  // jssp[0]: 0x00000000 (SMI tag & padding)
205  Register reg = Register(non_object_list.PopHighestIndex());
206  __ Pop(scratch, reg);
207  __ Bfxil(reg, scratch, 32, 32);
208  }
209 
210  // Don't bother removing padding bytes pushed on the stack
211  // as the frame is going to be restored right away.
212 
213  // Leave the internal frame.
214  }
215 
216  // Now that the break point has been handled, resume normal execution by
217  // jumping to the target address intended by the caller and that was
218  // overwritten by the address of DebugBreakXXX.
219  ExternalReference after_break_target =
220  ExternalReference::debug_after_break_target_address(masm->isolate());
221  __ Mov(scratch, after_break_target);
222  __ Ldr(scratch, MemOperand(scratch));
223  __ Br(scratch);
224 }
225 
226 
227 void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
228  // Register state for CallICStub
229  // ----------- S t a t e -------------
230  // -- x1 : function
231  // -- x3 : slot in feedback array
232  // -----------------------------------
233  Generate_DebugBreakCallHelper(masm, x1.Bit() | x3.Bit(), 0, x10);
234 }
235 
236 
237 void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
238  // Calling convention for IC load (from ic-arm.cc).
239  Register receiver = LoadDescriptor::ReceiverRegister();
240  Register name = LoadDescriptor::NameRegister();
241  Generate_DebugBreakCallHelper(masm, receiver.Bit() | name.Bit(), 0, x10);
242 }
243 
244 
245 void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
246  // Calling convention for IC store (from ic-arm64.cc).
247  Register receiver = StoreDescriptor::ReceiverRegister();
248  Register name = StoreDescriptor::NameRegister();
249  Register value = StoreDescriptor::ValueRegister();
250  Generate_DebugBreakCallHelper(
251  masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
252 }
253 
254 
255 void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
256  // Calling convention for keyed IC load (from ic-arm.cc).
258 }
259 
260 
261 void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
262  // Calling convention for IC keyed store call (from ic-arm64.cc).
263  Register receiver = StoreDescriptor::ReceiverRegister();
264  Register name = StoreDescriptor::NameRegister();
265  Register value = StoreDescriptor::ValueRegister();
266  Generate_DebugBreakCallHelper(
267  masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
268 }
269 
270 
271 void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
272  // Register state for CompareNil IC
273  // ----------- S t a t e -------------
274  // -- r0 : value
275  // -----------------------------------
276  Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
277 }
278 
279 
280 void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
281  // In places other than IC call sites it is expected that r0 is TOS which
282  // is an object - this is not generally the case so this should be used with
283  // care.
284  Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
285 }
286 
287 
288 void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
289  // Register state for CallFunctionStub (from code-stubs-arm64.cc).
290  // ----------- S t a t e -------------
291  // -- x1 : function
292  // -----------------------------------
293  Generate_DebugBreakCallHelper(masm, x1.Bit(), 0, x10);
294 }
295 
296 
297 void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
298  // Calling convention for CallConstructStub (from code-stubs-arm64.cc).
299  // ----------- S t a t e -------------
300  // -- x0 : number of arguments (not smi)
301  // -- x1 : constructor function
302  // -----------------------------------
303  Generate_DebugBreakCallHelper(masm, x1.Bit(), x0.Bit(), x10);
304 }
305 
306 
308  MacroAssembler* masm) {
309  // Calling convention for CallConstructStub (from code-stubs-arm64.cc).
310  // ----------- S t a t e -------------
311  // -- x0 : number of arguments (not smi)
312  // -- x1 : constructor function
313  // -- x2 : feedback array
314  // -- x3 : feedback slot (smi)
315  // -----------------------------------
316  Generate_DebugBreakCallHelper(
317  masm, x1.Bit() | x2.Bit() | x3.Bit(), x0.Bit(), x10);
318 }
319 
320 
321 void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
322  // Generate enough nop's to make space for a call instruction. Avoid emitting
323  // the constant pool in the debug break slot code.
324  InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
325 
326  __ RecordDebugBreakSlot();
327  for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
329  }
330 }
331 
332 
333 void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
334  // In the places where a debug break slot is inserted no registers can contain
335  // object pointers.
336  Generate_DebugBreakCallHelper(masm, 0, 0, x10);
337 }
338 
339 
340 void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
341  __ Ret();
342 }
343 
344 
345 void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
346  ExternalReference restarter_frame_function_slot =
347  ExternalReference::debug_restarter_frame_function_pointer_address(
348  masm->isolate());
349  UseScratchRegisterScope temps(masm);
350  Register scratch = temps.AcquireX();
351 
352  __ Mov(scratch, restarter_frame_function_slot);
353  __ Str(xzr, MemOperand(scratch));
354 
355  // We do not know our frame height, but set sp based on fp.
356  __ Sub(masm->StackPointer(), fp, kPointerSize);
357  __ AssertStackConsistency();
358 
359  __ Pop(x1, fp, lr); // Function, Frame, Return address.
360 
361  // Load context from the function.
363 
364  // Get function code.
366  __ Ldr(scratch, FieldMemOperand(scratch, SharedFunctionInfo::kCodeOffset));
367  __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
368 
369  // Re-run JSFunction, x1 is function, cp is context.
370  __ Br(scratch);
371 }
372 
373 
374 const bool LiveEdit::kFrameDropperSupported = true;
375 
376 } } // namespace v8::internal
377 
378 #endif // V8_TARGET_ARCH_ARM64
static const int kDebugBreakSlotInstructions
static const int kJSRetSequenceInstructions
Handle< DebugInfo > debug_info_
Definition: debug.h:123
static const int kHeaderSize
Definition: objects.h:5373
static void GenerateSlot(MacroAssembler *masm)
static void GenerateStoreICDebugBreak(MacroAssembler *masm)
static void GenerateLoadICDebugBreak(MacroAssembler *masm)
static void GeneratePlainReturnLiveEdit(MacroAssembler *masm)
static void GenerateCallConstructStubDebugBreak(MacroAssembler *masm)
static void GenerateCallConstructStubRecordDebugBreak(MacroAssembler *masm)
static void GenerateCallFunctionStubDebugBreak(MacroAssembler *masm)
static void GenerateCompareNilICDebugBreak(MacroAssembler *masm)
static void GenerateKeyedStoreICDebugBreak(MacroAssembler *masm)
static void GenerateCallICStubDebugBreak(MacroAssembler *masm)
static void GenerateFrameDropperLiveEdit(MacroAssembler *masm)
static void GenerateKeyedLoadICDebugBreak(MacroAssembler *masm)
static void GenerateReturnDebugBreak(MacroAssembler *masm)
static void GenerateSlotDebugBreak(MacroAssembler *masm)
static bool IsDebugBreakAtReturn(RelocInfo *rinfo)
static const int kSharedFunctionInfoOffset
Definition: objects.h:7379
static const int kContextOffset
Definition: objects.h:7381
static const int kFramePaddingValue
Definition: liveedit.h:184
static const bool kFrameDropperSupported
Definition: liveedit.h:147
static const int kFramePaddingInitialSize
Definition: liveedit.h:181
static const Register ReceiverRegister()
static const Register NameRegister()
static bool IsJSReturn(Mode mode)
Definition: assembler.h:412
void PatchCode(byte *instructions, int instruction_count)
static const int kCodeOffset
Definition: objects.h:6893
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
static const Register ReceiverRegister()
static const Register NameRegister()
static const Register ValueRegister()
#define __
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
#define DCHECK(condition)
Definition: logging.h:205
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
const int kPointerSize
Definition: globals.h:129
const int kSmiShift
const Register cp
const unsigned kXRegSizeInBits
const Register fp
const int kHltBadCode
const unsigned kWRegSizeInBits
MemOperand FieldMemOperand(Register object, int offset)
const unsigned kLoadLiteralScaleLog2
uint32_t RegList
Definition: frames.h:18
const Register lr
const int kHeapObjectTag
Definition: v8.h:5737
const int kSmiValueSize
Definition: v8.h:5806
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const int kSmiTag
Definition: v8.h:5742
const unsigned kInstructionSize
const RegList kCallerSaved
Definition: frames-arm.h:50
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20