V8 Project
assembler-mips64-inl.h
Go to the documentation of this file.
1 
2 // Copyright (c) 1994-2006 Sun Microsystems Inc.
3 // All Rights Reserved.
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 // - Redistributions of source code must retain the above copyright notice,
10 // this list of conditions and the following disclaimer.
11 //
12 // - Redistribution in binary form must reproduce the above copyright
13 // notice, this list of conditions and the following disclaimer in the
14 // documentation and/or other materials provided with the distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
21 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
24 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
27 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
28 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
29 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 
32 // The original source code covered by the above license above has been
33 // modified significantly by Google Inc.
34 // Copyright 2012 the V8 project authors. All rights reserved.
35 
36 
37 #ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
38 #define V8_MIPS_ASSEMBLER_MIPS_INL_H_
39 
41 
42 #include "src/assembler.h"
43 #include "src/debug.h"
44 
45 
46 namespace v8 {
47 namespace internal {
48 
49 
51 
52 
53 // -----------------------------------------------------------------------------
54 // Operand and MemOperand.
55 
56 Operand::Operand(int64_t immediate, RelocInfo::Mode rmode) {
57  rm_ = no_reg;
58  imm64_ = immediate;
59  rmode_ = rmode;
60 }
61 
62 
63 Operand::Operand(const ExternalReference& f) {
64  rm_ = no_reg;
65  imm64_ = reinterpret_cast<int64_t>(f.address());
67 }
68 
69 
70 Operand::Operand(Smi* value) {
71  rm_ = no_reg;
72  imm64_ = reinterpret_cast<intptr_t>(value);
73  rmode_ = RelocInfo::NONE32;
74 }
75 
76 
77 Operand::Operand(Register rm) {
78  rm_ = rm;
79 }
80 
81 
82 bool Operand::is_reg() const {
83  return rm_.is_valid();
84 }
85 
86 
89 }
90 
91 
94 }
95 
96 
99 }
100 
101 
103  return NumAllocatableRegisters();
104 }
105 
106 
107 int FPURegister::ToAllocationIndex(FPURegister reg) {
108  DCHECK(reg.code() % 2 == 0);
109  DCHECK(reg.code() / 2 < kMaxNumAllocatableRegisters);
110  DCHECK(reg.is_valid());
111  DCHECK(!reg.is(kDoubleRegZero));
112  DCHECK(!reg.is(kLithiumScratchDouble));
113  return (reg.code() / 2);
114 }
115 
116 
117 // -----------------------------------------------------------------------------
118 // RelocInfo.
119 
120 void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
122  // Absolute code pointer inside code object moves with the code object.
123  byte* p = reinterpret_cast<byte*>(pc_);
124  int count = Assembler::RelocateInternalReference(p, delta);
125  CpuFeatures::FlushICache(p, count * sizeof(uint32_t));
126  }
127 }
128 
129 
130 Address RelocInfo::target_address() {
133 }
134 
135 
136 Address RelocInfo::target_address_address() {
139  rmode_ == EMBEDDED_OBJECT ||
141  // Read the address of the word containing the target_address in an
142  // instruction stream.
143  // The only architecture-independent user of this function is the serializer.
144  // The serializer uses it to find out how many raw bytes of instruction to
145  // output before the next target.
146  // For an instruction like LUI/ORI where the target bits are mixed into the
147  // instruction bits, the size of the target will be zero, indicating that the
148  // serializer should not step forward in memory after a target is resolved
149  // and written. In this case the target_address_address function should
150  // return the end of the instructions to be patched, allowing the
151  // deserializer to deserialize the instructions as raw bytes and put them in
152  // place, ready to be patched with the target. After jump optimization,
153  // that is the address of the instruction that follows J/JAL/JR/JALR
154  // instruction.
155  // return reinterpret_cast<Address>(
156  // pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
157  return reinterpret_cast<Address>(
159 }
160 
161 
162 Address RelocInfo::constant_pool_entry_address() {
163  UNREACHABLE();
164  return NULL;
165 }
166 
167 
168 int RelocInfo::target_address_size() {
170 }
171 
172 
173 void RelocInfo::set_target_address(Address target,
174  WriteBarrierMode write_barrier_mode,
175  ICacheFlushMode icache_flush_mode) {
177  Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
178  if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
179  host() != NULL && IsCodeTarget(rmode_)) {
180  Object* target_code = Code::GetCodeFromTargetAddress(target);
181  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
182  host(), this, HeapObject::cast(target_code));
183  }
184 }
185 
186 
188  return pc - kCallTargetAddressOffset;
189 }
190 
191 
194 }
195 
196 
197 Object* RelocInfo::target_object() {
199  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
200 }
201 
202 
203 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
205  return Handle<Object>(reinterpret_cast<Object**>(
207 }
208 
209 
210 void RelocInfo::set_target_object(Object* target,
211  WriteBarrierMode write_barrier_mode,
212  ICacheFlushMode icache_flush_mode) {
215  reinterpret_cast<Address>(target),
216  icache_flush_mode);
217  if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
218  host() != NULL &&
219  target->IsHeapObject()) {
220  host()->GetHeap()->incremental_marking()->RecordWrite(
221  host(), &Memory::Object_at(pc_), HeapObject::cast(target));
222  }
223 }
224 
225 
226 Address RelocInfo::target_reference() {
229 }
230 
231 
232 Address RelocInfo::target_runtime_entry(Assembler* origin) {
234  return target_address();
235 }
236 
237 
238 void RelocInfo::set_target_runtime_entry(Address target,
239  WriteBarrierMode write_barrier_mode,
240  ICacheFlushMode icache_flush_mode) {
242  if (target_address() != target)
243  set_target_address(target, write_barrier_mode, icache_flush_mode);
244 }
245 
246 
247 Handle<Cell> RelocInfo::target_cell_handle() {
249  Address address = Memory::Address_at(pc_);
250  return Handle<Cell>(reinterpret_cast<Cell**>(address));
251 }
252 
253 
254 Cell* RelocInfo::target_cell() {
257 }
258 
259 
260 void RelocInfo::set_target_cell(Cell* cell,
261  WriteBarrierMode write_barrier_mode,
262  ICacheFlushMode icache_flush_mode) {
264  Address address = cell->address() + Cell::kValueOffset;
265  Memory::Address_at(pc_) = address;
266  if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
267  // TODO(1550) We are passing NULL as a slot because cell can never be on
268  // evacuation candidate.
269  host()->GetHeap()->incremental_marking()->RecordWrite(
270  host(), NULL, cell);
271  }
272 }
273 
274 
276 
277 
278 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
279  UNREACHABLE(); // This should never be reached on Arm.
280  return Handle<Object>();
281 }
282 
283 
284 Code* RelocInfo::code_age_stub() {
288 }
289 
290 
291 void RelocInfo::set_code_age_stub(Code* stub,
292  ICacheFlushMode icache_flush_mode) {
295  host_,
296  stub->instruction_start());
297 }
298 
299 
300 Address RelocInfo::call_address() {
301  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
302  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
303  // The pc_ offset of 0 assumes mips patched return sequence per
304  // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
305  // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
307 }
308 
309 
310 void RelocInfo::set_call_address(Address target) {
311  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
312  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
313  // The pc_ offset of 0 assumes mips patched return sequence per
314  // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
315  // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
317  if (host() != NULL) {
318  Object* target_code = Code::GetCodeFromTargetAddress(target);
319  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
320  host(), this, HeapObject::cast(target_code));
321  }
322 }
323 
324 
325 Object* RelocInfo::call_object() {
326  return *call_object_address();
327 }
328 
329 
330 Object** RelocInfo::call_object_address() {
331  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
332  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
333  return reinterpret_cast<Object**>(pc_ + 6 * Assembler::kInstrSize);
334 }
335 
336 
337 void RelocInfo::set_call_object(Object* target) {
338  *call_object_address() = target;
339 }
340 
341 
342 void RelocInfo::WipeOut() {
344  IsCodeTarget(rmode_) ||
348 }
349 
350 
351 bool RelocInfo::IsPatchedReturnSequence() {
352  Instr instr0 = Assembler::instr_at(pc_); // lui.
353  Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize); // ori.
354  Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize); // dsll.
355  Instr instr3 = Assembler::instr_at(pc_ + 3 * Assembler::kInstrSize); // ori.
356  Instr instr4 = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize); // jalr.
357 
358  bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
359  (instr1 & kOpcodeMask) == ORI &&
360  (instr2 & kFunctionFieldMask) == DSLL &&
361  (instr3 & kOpcodeMask) == ORI &&
362  (instr4 & kFunctionFieldMask) == JALR);
363  return patched_return;
364 }
365 
366 
367 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
368  Instr current_instr = Assembler::instr_at(pc_);
369  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
370 }
371 
372 
373 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
376  visitor->VisitEmbeddedPointer(this);
377  } else if (RelocInfo::IsCodeTarget(mode)) {
378  visitor->VisitCodeTarget(this);
379  } else if (mode == RelocInfo::CELL) {
380  visitor->VisitCell(this);
381  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
382  visitor->VisitExternalReference(this);
383  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
384  visitor->VisitCodeAgeSequence(this);
385  } else if (((RelocInfo::IsJSReturn(mode) &&
386  IsPatchedReturnSequence()) ||
388  IsPatchedDebugBreakSlotSequence())) &&
389  isolate->debug()->has_break_points()) {
390  visitor->VisitDebugTarget(this);
391  } else if (RelocInfo::IsRuntimeEntry(mode)) {
392  visitor->VisitRuntimeEntry(this);
393  }
394 }
395 
396 
397 template<typename StaticVisitor>
398 void RelocInfo::Visit(Heap* heap) {
401  StaticVisitor::VisitEmbeddedPointer(heap, this);
402  } else if (RelocInfo::IsCodeTarget(mode)) {
403  StaticVisitor::VisitCodeTarget(heap, this);
404  } else if (mode == RelocInfo::CELL) {
405  StaticVisitor::VisitCell(heap, this);
406  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
407  StaticVisitor::VisitExternalReference(this);
408  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
409  StaticVisitor::VisitCodeAgeSequence(heap, this);
410  } else if (heap->isolate()->debug()->has_break_points() &&
412  IsPatchedReturnSequence()) ||
414  IsPatchedDebugBreakSlotSequence()))) {
415  StaticVisitor::VisitDebugTarget(heap, this);
416  } else if (RelocInfo::IsRuntimeEntry(mode)) {
417  StaticVisitor::VisitRuntimeEntry(this);
418  }
419 }
420 
421 
422 // -----------------------------------------------------------------------------
423 // Assembler.
424 
425 
426 void Assembler::CheckBuffer() {
427  if (buffer_space() <= kGap) {
428  GrowBuffer();
429  }
430 }
431 
432 
434  if (pc_offset() >= next_buffer_check_) {
436  }
437 }
438 
439 
440 void Assembler::emit(Instr x) {
441  if (!is_buffer_growth_blocked()) {
442  CheckBuffer();
443  }
444  *reinterpret_cast<Instr*>(pc_) = x;
445  pc_ += kInstrSize;
447 }
448 
449 
450 void Assembler::emit(uint64_t x) {
451  if (!is_buffer_growth_blocked()) {
452  CheckBuffer();
453  }
454  *reinterpret_cast<uint64_t*>(pc_) = x;
455  pc_ += kInstrSize * 2;
457 }
458 
459 
460 } } // namespace v8::internal
461 
462 #endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
#define kDoubleRegZero
#define kLithiumScratchDouble
static const int kSpecialTargetSize
bool is_buffer_growth_blocked() const
static const int kInstrSize
Instruction * pc() const
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
static const int kPatchDebugBreakSlotReturnOffset
static Address break_address_from_return_address(Address pc)
static const int kGap
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
static Address target_address_from_return_address(Address pc)
static const int kCallTargetAddressOffset
static const int kInstructionsFor64BitConstant
static int RelocateInternalReference(byte *pc, intptr_t pc_delta)
static const int kValueOffset
Definition: objects.h:9446
static Cell * FromValueAddress(Address value)
Definition: objects.h:9431
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:5018
byte * instruction_start()
Definition: objects-inl.h:6176
static void FlushICache(void *start, size_t size)
static bool IsSupported(CpuFeature f)
Definition: assembler.h:184
Heap * GetHeap() const
Definition: objects-inl.h:1379
IncrementalMarking * incremental_marking()
Definition: heap.h:1205
static Object *& Object_at(Address addr)
Definition: v8memory.h:60
static Address & Address_at(Address addr)
Definition: v8memory.h:56
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
Immediate immediate() const
static bool IsDebugBreakSlot(Mode mode)
Definition: assembler.h:436
static bool IsJSReturn(Mode mode)
Definition: assembler.h:412
static bool IsEmbeddedObject(Mode mode)
Definition: assembler.h:402
static bool IsRuntimeEntry(Mode mode)
Definition: assembler.h:405
static bool IsCodeTarget(Mode mode)
Definition: assembler.h:399
static bool IsExternalReference(Mode mode)
Definition: assembler.h:430
Code * host() const
Definition: assembler.h:463
Mode rmode() const
Definition: assembler.h:459
static bool IsInternalReference(Mode mode)
Definition: assembler.h:433
static bool IsCodeAgeSequence(Mode mode)
Definition: assembler.h:442
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
@ UPDATE_WRITE_BARRIER
Definition: objects.h:235
kSerializedDataOffset Object
Definition: objects-inl.h:5322
const int kFunctionFieldMask
const Register pc
byte * Address
Definition: globals.h:101
const Register no_reg
const int kOpcodeMask
static const int kNoCodeAgeSequenceLength
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
static int NumAllocatableAliasedRegisters()
static const int kMaxNumAllocatableRegisters
static int ToAllocationIndex(FPURegister reg)
static const int kMaxNumRegisters
static int NumAllocatableRegisters()
static const int kMaxNumAllocatableRegisters
Definition: assembler-arm.h:96