V8 Project
codegen-mips64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_MIPS64
8 
9 #include "src/codegen.h"
10 #include "src/macro-assembler.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 
17 #define __ masm.
18 
19 
20 #if defined(USE_SIMULATOR)
21 byte* fast_exp_mips_machine_code = NULL;
22 double fast_exp_simulator(double x) {
23  return Simulator::current(Isolate::Current())->CallFP(
24  fast_exp_mips_machine_code, x, 0);
25 }
26 #endif
27 
28 
30  if (!FLAG_fast_math) return &std::exp;
31  size_t actual_size;
32  byte* buffer =
33  static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
34  if (buffer == NULL) return &std::exp;
35  ExternalReference::InitializeMathExpData();
36 
37  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
38 
39  {
40  DoubleRegister input = f12;
41  DoubleRegister result = f0;
42  DoubleRegister double_scratch1 = f4;
43  DoubleRegister double_scratch2 = f6;
44  Register temp1 = a4;
45  Register temp2 = a5;
46  Register temp3 = a6;
47 
48  if (!IsMipsSoftFloatABI) {
49  // Input value is in f12 anyway, nothing to do.
50  } else {
51  __ Move(input, a0, a1);
52  }
53  __ Push(temp3, temp2, temp1);
55  &masm, input, result, double_scratch1, double_scratch2,
56  temp1, temp2, temp3);
57  __ Pop(temp3, temp2, temp1);
58  if (!IsMipsSoftFloatABI) {
59  // Result is already in f0, nothing to do.
60  } else {
61  __ Move(v0, v1, result);
62  }
63  __ Ret();
64  }
65 
66  CodeDesc desc;
67  masm.GetCode(&desc);
68  DCHECK(!RelocInfo::RequiresRelocation(desc));
69 
70  CpuFeatures::FlushICache(buffer, actual_size);
71  base::OS::ProtectCode(buffer, actual_size);
72 
73 #if !defined(USE_SIMULATOR)
74  return FUNCTION_CAST<UnaryMathFunction>(buffer);
75 #else
76  fast_exp_mips_machine_code = buffer;
77  return &fast_exp_simulator;
78 #endif
79 }
80 
81 
82 #if defined(V8_HOST_ARCH_MIPS)
83 MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
84 #if defined(USE_SIMULATOR)
85  return stub;
86 #else
87 
88  size_t actual_size;
89  byte* buffer =
90  static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
91  if (buffer == NULL) return stub;
92 
93  // This code assumes that cache lines are 32 bytes and if the cache line is
94  // larger it will not work correctly.
95  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
96 
97  {
98  Label lastb, unaligned, aligned, chkw,
99  loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
100  leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
101  ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
102 
103  // The size of each prefetch.
104  uint32_t pref_chunk = 32;
105  // The maximum size of a prefetch, it must not be less then pref_chunk.
106  // If the real size of a prefetch is greater then max_pref_size and
107  // the kPrefHintPrepareForStore hint is used, the code will not work
108  // correctly.
109  uint32_t max_pref_size = 128;
110  DCHECK(pref_chunk < max_pref_size);
111 
112  // pref_limit is set based on the fact that we never use an offset
113  // greater then 5 on a store pref and that a single pref can
114  // never be larger then max_pref_size.
115  uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
116  int32_t pref_hint_load = kPrefHintLoadStreamed;
117  int32_t pref_hint_store = kPrefHintPrepareForStore;
118  uint32_t loadstore_chunk = 4;
119 
120  // The initial prefetches may fetch bytes that are before the buffer being
121  // copied. Start copies with an offset of 4 so avoid this situation when
122  // using kPrefHintPrepareForStore.
123  DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
124  pref_chunk * 4 >= max_pref_size);
125  // If the size is less than 8, go to lastb. Regardless of size,
126  // copy dst pointer to v0 for the retuen value.
127  __ slti(a6, a2, 2 * loadstore_chunk);
128  __ bne(a6, zero_reg, &lastb);
129  __ mov(v0, a0); // In delay slot.
130 
131  // If src and dst have different alignments, go to unaligned, if they
132  // have the same alignment (but are not actually aligned) do a partial
133  // load/store to make them aligned. If they are both already aligned
134  // we can start copying at aligned.
135  __ xor_(t8, a1, a0);
136  __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
137  __ bne(t8, zero_reg, &unaligned);
138  __ subu(a3, zero_reg, a0); // In delay slot.
139 
140  __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
141  __ beq(a3, zero_reg, &aligned); // Already aligned.
142  __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
143 
144  __ lwr(t8, MemOperand(a1));
145  __ addu(a1, a1, a3);
146  __ swr(t8, MemOperand(a0));
147  __ addu(a0, a0, a3);
148 
149  // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
150  // count how many bytes we have to copy after all the 64 byte chunks are
151  // copied and a3 to the dst pointer after all the 64 byte chunks have been
152  // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
153  __ bind(&aligned);
154  __ andi(t8, a2, 0x3f);
155  __ beq(a2, t8, &chkw); // Less than 64?
156  __ subu(a3, a2, t8); // In delay slot.
157  __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
158 
159  // When in the loop we prefetch with kPrefHintPrepareForStore hint,
160  // in this case the a0+x should be past the "a4-32" address. This means:
161  // for x=128 the last "safe" a0 address is "a4-160". Alternatively, for
162  // x=64 the last "safe" a0 address is "a4-96". In the current version we
163  // will use "pref hint, 128(a0)", so "a4-160" is the limit.
164  if (pref_hint_store == kPrefHintPrepareForStore) {
165  __ addu(a4, a0, a2); // a4 is the "past the end" address.
166  __ Subu(t9, a4, pref_limit); // t9 is the "last safe pref" address.
167  }
168 
169  __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
170  __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
171  __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
172  __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
173 
174  if (pref_hint_store != kPrefHintPrepareForStore) {
175  __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
176  __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
177  __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
178  }
179  __ bind(&loop16w);
180  __ lw(a4, MemOperand(a1));
181 
182  if (pref_hint_store == kPrefHintPrepareForStore) {
183  __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
184  __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
185  }
186  __ lw(a5, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
187 
188  __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
189  __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
190 
191  __ bind(&skip_pref);
192  __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
193  __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
194  __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
195  __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
196  __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
197  __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
198  __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
199 
200  __ sw(a4, MemOperand(a0));
201  __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
202  __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
203  __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
204  __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
205  __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
206  __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
207  __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
208 
209  __ lw(a4, MemOperand(a1, 8, loadstore_chunk));
210  __ lw(a5, MemOperand(a1, 9, loadstore_chunk));
211  __ lw(a6, MemOperand(a1, 10, loadstore_chunk));
212  __ lw(a7, MemOperand(a1, 11, loadstore_chunk));
213  __ lw(t0, MemOperand(a1, 12, loadstore_chunk));
214  __ lw(t1, MemOperand(a1, 13, loadstore_chunk));
215  __ lw(t2, MemOperand(a1, 14, loadstore_chunk));
216  __ lw(t3, MemOperand(a1, 15, loadstore_chunk));
217  __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
218 
219  __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
220  __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
221  __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
222  __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
223  __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
224  __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
225  __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
226  __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
227  __ addiu(a0, a0, 16 * loadstore_chunk);
228  __ bne(a0, a3, &loop16w);
229  __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
230  __ mov(a2, t8);
231 
232  // Here we have src and dest word-aligned but less than 64-bytes to go.
233  // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
234  // down to chk1w to handle the tail end of the copy.
235  __ bind(&chkw);
236  __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
237  __ andi(t8, a2, 0x1f);
238  __ beq(a2, t8, &chk1w); // Less than 32?
239  __ nop(); // In delay slot.
240  __ lw(a4, MemOperand(a1));
241  __ lw(a5, MemOperand(a1, 1, loadstore_chunk));
242  __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
243  __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
244  __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
245  __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
246  __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
247  __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
248  __ addiu(a1, a1, 8 * loadstore_chunk);
249  __ sw(a4, MemOperand(a0));
250  __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
251  __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
252  __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
253  __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
254  __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
255  __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
256  __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
257  __ addiu(a0, a0, 8 * loadstore_chunk);
258 
259  // Here we have less than 32 bytes to copy. Set up for a loop to copy
260  // one word at a time. Set a2 to count how many bytes we have to copy
261  // after all the word chunks are copied and a3 to the dst pointer after
262  // all the word chunks have been copied. We will loop, incrementing a0
263  // and a1 untill a0 equals a3.
264  __ bind(&chk1w);
265  __ andi(a2, t8, loadstore_chunk - 1);
266  __ beq(a2, t8, &lastb);
267  __ subu(a3, t8, a2); // In delay slot.
268  __ addu(a3, a0, a3);
269 
270  __ bind(&wordCopy_loop);
271  __ lw(a7, MemOperand(a1));
272  __ addiu(a0, a0, loadstore_chunk);
273  __ addiu(a1, a1, loadstore_chunk);
274  __ bne(a0, a3, &wordCopy_loop);
275  __ sw(a7, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
276 
277  __ bind(&lastb);
278  __ Branch(&leave, le, a2, Operand(zero_reg));
279  __ addu(a3, a0, a2);
280 
281  __ bind(&lastbloop);
282  __ lb(v1, MemOperand(a1));
283  __ addiu(a0, a0, 1);
284  __ addiu(a1, a1, 1);
285  __ bne(a0, a3, &lastbloop);
286  __ sb(v1, MemOperand(a0, -1)); // In delay slot.
287 
288  __ bind(&leave);
289  __ jr(ra);
290  __ nop();
291 
292  // Unaligned case. Only the dst gets aligned so we need to do partial
293  // loads of the source followed by normal stores to the dst (once we
294  // have aligned the destination).
295  __ bind(&unaligned);
296  __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
297  __ beq(a3, zero_reg, &ua_chk16w);
298  __ subu(a2, a2, a3); // In delay slot.
299 
300  __ lwr(v1, MemOperand(a1));
301  __ lwl(v1,
302  MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
303  __ addu(a1, a1, a3);
304  __ swr(v1, MemOperand(a0));
305  __ addu(a0, a0, a3);
306 
307  // Now the dst (but not the source) is aligned. Set a2 to count how many
308  // bytes we have to copy after all the 64 byte chunks are copied and a3 to
309  // the dst pointer after all the 64 byte chunks have been copied. We will
310  // loop, incrementing a0 and a1 until a0 equals a3.
311  __ bind(&ua_chk16w);
312  __ andi(t8, a2, 0x3f);
313  __ beq(a2, t8, &ua_chkw);
314  __ subu(a3, a2, t8); // In delay slot.
315  __ addu(a3, a0, a3);
316 
317  if (pref_hint_store == kPrefHintPrepareForStore) {
318  __ addu(a4, a0, a2);
319  __ Subu(t9, a4, pref_limit);
320  }
321 
322  __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
323  __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
324  __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
325 
326  if (pref_hint_store != kPrefHintPrepareForStore) {
327  __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
328  __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
329  __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
330  }
331 
332  __ bind(&ua_loop16w);
333  __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
334  __ lwr(a4, MemOperand(a1));
335  __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
336  __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
337 
338  if (pref_hint_store == kPrefHintPrepareForStore) {
339  __ sltu(v1, t9, a0);
340  __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
341  }
342  __ lwr(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
343 
344  __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
345  __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
346 
347  __ bind(&ua_skip_pref);
348  __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
349  __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
350  __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
351  __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
352  __ lwl(a4,
353  MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
354  __ lwl(a5,
355  MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
356  __ lwl(a6,
357  MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
358  __ lwl(a7,
359  MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
360  __ lwl(t0,
361  MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
362  __ lwl(t1,
363  MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
364  __ lwl(t2,
365  MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
366  __ lwl(t3,
367  MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
368  __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
369  __ sw(a4, MemOperand(a0));
370  __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
371  __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
372  __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
373  __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
374  __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
375  __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
376  __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
377  __ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
378  __ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
379  __ lwr(a6, MemOperand(a1, 10, loadstore_chunk));
380  __ lwr(a7, MemOperand(a1, 11, loadstore_chunk));
381  __ lwr(t0, MemOperand(a1, 12, loadstore_chunk));
382  __ lwr(t1, MemOperand(a1, 13, loadstore_chunk));
383  __ lwr(t2, MemOperand(a1, 14, loadstore_chunk));
384  __ lwr(t3, MemOperand(a1, 15, loadstore_chunk));
385  __ lwl(a4,
386  MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
387  __ lwl(a5,
388  MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
389  __ lwl(a6,
390  MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
391  __ lwl(a7,
392  MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
393  __ lwl(t0,
394  MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
395  __ lwl(t1,
396  MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
397  __ lwl(t2,
398  MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
399  __ lwl(t3,
400  MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
401  __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
402  __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
403  __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
404  __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
405  __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
406  __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
407  __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
408  __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
409  __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
410  __ addiu(a0, a0, 16 * loadstore_chunk);
411  __ bne(a0, a3, &ua_loop16w);
412  __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
413  __ mov(a2, t8);
414 
415  // Here less than 64-bytes. Check for
416  // a 32 byte chunk and copy if there is one. Otherwise jump down to
417  // ua_chk1w to handle the tail end of the copy.
418  __ bind(&ua_chkw);
419  __ Pref(pref_hint_load, MemOperand(a1));
420  __ andi(t8, a2, 0x1f);
421 
422  __ beq(a2, t8, &ua_chk1w);
423  __ nop(); // In delay slot.
424  __ lwr(a4, MemOperand(a1));
425  __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
426  __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
427  __ lwr(a7, MemOperand(a1, 3, loadstore_chunk));
428  __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
429  __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
430  __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
431  __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
432  __ lwl(a4,
433  MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
434  __ lwl(a5,
435  MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
436  __ lwl(a6,
437  MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
438  __ lwl(a7,
439  MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
440  __ lwl(t0,
441  MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
442  __ lwl(t1,
443  MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
444  __ lwl(t2,
445  MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
446  __ lwl(t3,
447  MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
448  __ addiu(a1, a1, 8 * loadstore_chunk);
449  __ sw(a4, MemOperand(a0));
450  __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
451  __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
452  __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
453  __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
454  __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
455  __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
456  __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
457  __ addiu(a0, a0, 8 * loadstore_chunk);
458 
459  // Less than 32 bytes to copy. Set up for a loop to
460  // copy one word at a time.
461  __ bind(&ua_chk1w);
462  __ andi(a2, t8, loadstore_chunk - 1);
463  __ beq(a2, t8, &ua_smallCopy);
464  __ subu(a3, t8, a2); // In delay slot.
465  __ addu(a3, a0, a3);
466 
467  __ bind(&ua_wordCopy_loop);
468  __ lwr(v1, MemOperand(a1));
469  __ lwl(v1,
470  MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
471  __ addiu(a0, a0, loadstore_chunk);
472  __ addiu(a1, a1, loadstore_chunk);
473  __ bne(a0, a3, &ua_wordCopy_loop);
474  __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
475 
476  // Copy the last 8 bytes.
477  __ bind(&ua_smallCopy);
478  __ beq(a2, zero_reg, &leave);
479  __ addu(a3, a0, a2); // In delay slot.
480 
481  __ bind(&ua_smallCopy_loop);
482  __ lb(v1, MemOperand(a1));
483  __ addiu(a0, a0, 1);
484  __ addiu(a1, a1, 1);
485  __ bne(a0, a3, &ua_smallCopy_loop);
486  __ sb(v1, MemOperand(a0, -1)); // In delay slot.
487 
488  __ jr(ra);
489  __ nop();
490  }
491  CodeDesc desc;
492  masm.GetCode(&desc);
493  DCHECK(!RelocInfo::RequiresRelocation(desc));
494 
495  CpuFeatures::FlushICache(buffer, actual_size);
496  base::OS::ProtectCode(buffer, actual_size);
497  return FUNCTION_CAST<MemCopyUint8Function>(buffer);
498 #endif
499 }
500 #endif
501 
503 #if defined(USE_SIMULATOR)
504  return &std::sqrt;
505 #else
506  size_t actual_size;
507  byte* buffer =
508  static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
509  if (buffer == NULL) return &std::sqrt;
510 
511  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
512 
513  __ MovFromFloatParameter(f12);
514  __ sqrt_d(f0, f12);
515  __ MovToFloatResult(f0);
516  __ Ret();
517 
518  CodeDesc desc;
519  masm.GetCode(&desc);
520  DCHECK(!RelocInfo::RequiresRelocation(desc));
521 
522  CpuFeatures::FlushICache(buffer, actual_size);
523  base::OS::ProtectCode(buffer, actual_size);
524  return FUNCTION_CAST<UnaryMathFunction>(buffer);
525 #endif
526 }
527 
528 #undef __
529 
530 
531 // -------------------------------------------------------------------------
532 // Platform-specific RuntimeCallHelper functions.
533 
534 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
535  masm->EnterFrame(StackFrame::INTERNAL);
536  DCHECK(!masm->has_frame());
537  masm->set_has_frame(true);
538 }
539 
540 
541 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
542  masm->LeaveFrame(StackFrame::INTERNAL);
543  DCHECK(masm->has_frame());
544  masm->set_has_frame(false);
545 }
546 
547 
548 // -------------------------------------------------------------------------
549 // Code generators
550 
551 #define __ ACCESS_MASM(masm)
552 
554  MacroAssembler* masm,
555  Register receiver,
556  Register key,
557  Register value,
558  Register target_map,
560  Label* allocation_memento_found) {
561  Register scratch_elements = a4;
562  DCHECK(!AreAliased(receiver, key, value, target_map,
563  scratch_elements));
564 
565  if (mode == TRACK_ALLOCATION_SITE) {
566  __ JumpIfJSArrayHasAllocationMemento(
567  receiver, scratch_elements, allocation_memento_found);
568  }
569 
570  // Set transitioned map.
571  __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
572  __ RecordWriteField(receiver,
574  target_map,
575  t1,
580 }
581 
582 
584  MacroAssembler* masm,
585  Register receiver,
586  Register key,
587  Register value,
588  Register target_map,
590  Label* fail) {
591  // Register ra contains the return address.
592  Label loop, entry, convert_hole, gc_required, only_change_map, done;
593  Register elements = a4;
594  Register length = a5;
595  Register array = a6;
596  Register array_end = array;
597 
598  // target_map parameter can be clobbered.
599  Register scratch1 = target_map;
600  Register scratch2 = t1;
601  Register scratch3 = a7;
602 
603  // Verify input registers don't conflict with locals.
604  DCHECK(!AreAliased(receiver, key, value, target_map,
605  elements, length, array, scratch2));
606 
607  Register scratch = t2;
608  if (mode == TRACK_ALLOCATION_SITE) {
609  __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
610  }
611 
612  // Check for empty arrays, which only require a map transition and no changes
613  // to the backing store.
614  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
615  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
616  __ Branch(&only_change_map, eq, at, Operand(elements));
617 
618  __ push(ra);
619  __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
620  // elements: source FixedArray
621  // length: number of elements (smi-tagged)
622 
623  // Allocate new FixedDoubleArray.
624  __ SmiScale(scratch, length, kDoubleSizeLog2);
625  __ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize);
626  __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
627  // array: destination FixedDoubleArray, not tagged as heap object
628 
629  // Set destination FixedDoubleArray's length and map.
630  __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
631  __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
632  // Update receiver's map.
633  __ sd(scratch2, MemOperand(array, HeapObject::kMapOffset));
634 
635  __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
636  __ RecordWriteField(receiver,
638  target_map,
639  scratch2,
644  // Replace receiver's backing store with newly created FixedDoubleArray.
645  __ Daddu(scratch1, array, Operand(kHeapObjectTag));
646  __ sd(scratch1, FieldMemOperand(a2, JSObject::kElementsOffset));
647  __ RecordWriteField(receiver,
649  scratch1,
650  scratch2,
655 
656 
657  // Prepare for conversion loop.
658  __ Daddu(scratch1, elements,
660  __ Daddu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
661  __ SmiScale(array_end, length, kDoubleSizeLog2);
662  __ Daddu(array_end, array_end, scratch3);
663 
664  // Repurpose registers no longer in use.
665  Register hole_lower = elements;
666  Register hole_upper = length;
667  __ li(hole_lower, Operand(kHoleNanLower32));
668  // scratch1: begin of source FixedArray element fields, not tagged
669  // hole_lower: kHoleNanLower32
670  // hole_upper: kHoleNanUpper32
671  // array_end: end of destination FixedDoubleArray, not tagged
672  // scratch3: begin of FixedDoubleArray element fields, not tagged
673  __ Branch(USE_DELAY_SLOT, &entry);
674  __ li(hole_upper, Operand(kHoleNanUpper32)); // In delay slot.
675 
676  __ bind(&only_change_map);
677  __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
678  __ RecordWriteField(receiver,
680  target_map,
681  scratch2,
686  __ Branch(&done);
687 
688  // Call into runtime if GC is required.
689  __ bind(&gc_required);
690  __ ld(ra, MemOperand(sp, 0));
691  __ Branch(USE_DELAY_SLOT, fail);
692  __ daddiu(sp, sp, kPointerSize); // In delay slot.
693 
694  // Convert and copy elements.
695  __ bind(&loop);
696  __ ld(scratch2, MemOperand(scratch1));
697  __ Daddu(scratch1, scratch1, kIntSize);
698  // scratch2: current element
699  __ JumpIfNotSmi(scratch2, &convert_hole);
700  __ SmiUntag(scratch2);
701 
702  // Normal smi, convert to double and store.
703  __ mtc1(scratch2, f0);
704  __ cvt_d_w(f0, f0);
705  __ sdc1(f0, MemOperand(scratch3));
706  __ Branch(USE_DELAY_SLOT, &entry);
707  __ daddiu(scratch3, scratch3, kDoubleSize); // In delay slot.
708 
709  // Hole found, store the-hole NaN.
710  __ bind(&convert_hole);
711  if (FLAG_debug_code) {
712  // Restore a "smi-untagged" heap object.
713  __ Or(scratch2, scratch2, Operand(1));
714  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
715  __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
716  }
717  // mantissa
718  __ sw(hole_lower, MemOperand(scratch3));
719  // exponent
720  __ sw(hole_upper, MemOperand(scratch3, kIntSize));
721  __ Daddu(scratch3, scratch3, kDoubleSize);
722 
723  __ bind(&entry);
724  __ Branch(&loop, lt, scratch3, Operand(array_end));
725 
726  __ bind(&done);
727  __ pop(ra);
728 }
729 
730 
732  MacroAssembler* masm,
733  Register receiver,
734  Register key,
735  Register value,
736  Register target_map,
738  Label* fail) {
739  // Register ra contains the return address.
740  Label entry, loop, convert_hole, gc_required, only_change_map;
741  Register elements = a4;
742  Register array = a6;
743  Register length = a5;
744  Register scratch = t1;
745 
746  // Verify input registers don't conflict with locals.
747  DCHECK(!AreAliased(receiver, key, value, target_map,
748  elements, array, length, scratch));
749  if (mode == TRACK_ALLOCATION_SITE) {
750  __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
751  }
752 
753  // Check for empty arrays, which only require a map transition and no changes
754  // to the backing store.
755  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
756  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
757  __ Branch(&only_change_map, eq, at, Operand(elements));
758 
759  __ MultiPush(
760  value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
761 
762  __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
763  // elements: source FixedArray
764  // length: number of elements (smi-tagged)
765 
766  // Allocate new FixedArray.
767  // Re-use value and target_map registers, as they have been saved on the
768  // stack.
769  Register array_size = value;
770  Register allocate_scratch = target_map;
771  __ SmiScale(array_size, length, kPointerSizeLog2);
772  __ Daddu(array_size, array_size, FixedDoubleArray::kHeaderSize);
773  __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
775  // array: destination FixedArray, not tagged as heap object
776  // Set destination FixedDoubleArray's length and map.
777  __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
778  __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
779  __ sd(scratch, MemOperand(array, HeapObject::kMapOffset));
780 
781  // Prepare for conversion loop.
782  Register src_elements = elements;
783  Register dst_elements = target_map;
784  Register dst_end = length;
785  Register heap_number_map = scratch;
786  __ Daddu(src_elements, src_elements,
788  __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
789  __ Daddu(array, array, Operand(kHeapObjectTag));
790  __ SmiScale(dst_end, dst_end, kPointerSizeLog2);
791  __ Daddu(dst_end, dst_elements, dst_end);
792  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
793  // Using offsetted addresses.
794  // dst_elements: begin of destination FixedArray element fields, not tagged
795  // src_elements: begin of source FixedDoubleArray element fields, not tagged,
796  // points to the exponent
797  // dst_end: end of destination FixedArray, not tagged
798  // array: destination FixedArray
799  // heap_number_map: heap number map
800  __ Branch(&entry);
801 
802  // Call into runtime if GC is required.
803  __ bind(&gc_required);
804  __ MultiPop(
805  value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
806 
807  __ Branch(fail);
808 
809  __ bind(&loop);
810  Register upper_bits = key;
811  __ lw(upper_bits, MemOperand(src_elements));
812  __ Daddu(src_elements, src_elements, kDoubleSize);
813  // upper_bits: current element's upper 32 bit
814  // src_elements: address of next element's upper 32 bit
815  __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
816 
817  // Non-hole double, copy value into a heap number.
818  Register heap_number = receiver;
819  Register scratch2 = value;
820  Register scratch3 = t2;
821  __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
822  &gc_required);
823  // heap_number: new heap number
824  // Load mantissa of current element, src_elements
825  // point to exponent of next element.
826  __ lw(scratch2, MemOperand(heap_number, -12));
827  __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
828  __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
829  __ mov(scratch2, dst_elements);
830  __ sd(heap_number, MemOperand(dst_elements));
831  __ Daddu(dst_elements, dst_elements, kPointerSize);
832  __ RecordWrite(array,
833  scratch2,
834  heap_number,
839  __ Branch(&entry);
840 
841  // Replace the-hole NaN with the-hole pointer.
842  __ bind(&convert_hole);
843  __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
844  __ sd(scratch2, MemOperand(dst_elements));
845  __ Daddu(dst_elements, dst_elements, kPointerSize);
846 
847  __ bind(&entry);
848  __ Branch(&loop, lt, dst_elements, Operand(dst_end));
849 
850  __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
851  // Replace receiver's backing store with newly created and filled FixedArray.
852  __ sd(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
853  __ RecordWriteField(receiver,
855  array,
856  scratch,
861  __ pop(ra);
862 
863  __ bind(&only_change_map);
864  // Update receiver's map.
865  __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
866  __ RecordWriteField(receiver,
868  target_map,
869  scratch,
874 }
875 
876 
877 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
878  Register string,
879  Register index,
880  Register result,
881  Label* call_runtime) {
882  // Fetch the instance type of the receiver into result register.
883  __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
884  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
885 
886  // We need special handling for indirect strings.
887  Label check_sequential;
888  __ And(at, result, Operand(kIsIndirectStringMask));
889  __ Branch(&check_sequential, eq, at, Operand(zero_reg));
890 
891  // Dispatch on the indirect string shape: slice or cons.
892  Label cons_string;
893  __ And(at, result, Operand(kSlicedNotConsMask));
894  __ Branch(&cons_string, eq, at, Operand(zero_reg));
895 
896  // Handle slices.
897  Label indirect_string_loaded;
898  __ ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
899  __ ld(string, FieldMemOperand(string, SlicedString::kParentOffset));
900  __ dsra32(at, result, 0);
901  __ Daddu(index, index, at);
902  __ jmp(&indirect_string_loaded);
903 
904  // Handle cons strings.
905  // Check whether the right hand side is the empty string (i.e. if
906  // this is really a flat string in a cons string). If that is not
907  // the case we would rather go to the runtime system now to flatten
908  // the string.
909  __ bind(&cons_string);
910  __ ld(result, FieldMemOperand(string, ConsString::kSecondOffset));
911  __ LoadRoot(at, Heap::kempty_stringRootIndex);
912  __ Branch(call_runtime, ne, result, Operand(at));
913  // Get the first of the two strings and load its instance type.
914  __ ld(string, FieldMemOperand(string, ConsString::kFirstOffset));
915 
916  __ bind(&indirect_string_loaded);
917  __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
918  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
919 
920  // Distinguish sequential and external strings. Only these two string
921  // representations can reach here (slices and flat cons strings have been
922  // reduced to the underlying sequential or external string).
923  Label external_string, check_encoding;
924  __ bind(&check_sequential);
926  __ And(at, result, Operand(kStringRepresentationMask));
927  __ Branch(&external_string, ne, at, Operand(zero_reg));
928 
929  // Prepare sequential strings
931  __ Daddu(string,
932  string,
934  __ jmp(&check_encoding);
935 
936  // Handle external strings.
937  __ bind(&external_string);
938  if (FLAG_debug_code) {
939  // Assert that we do not have a cons or slice (indirect strings) here.
940  // Sequential strings have already been ruled out.
941  __ And(at, result, Operand(kIsIndirectStringMask));
942  __ Assert(eq, kExternalStringExpectedButNotFound,
943  at, Operand(zero_reg));
944  }
945  // Rule out short external strings.
947  __ And(at, result, Operand(kShortExternalStringMask));
948  __ Branch(call_runtime, ne, at, Operand(zero_reg));
950 
951  Label one_byte, done;
952  __ bind(&check_encoding);
954  __ And(at, result, Operand(kStringEncodingMask));
955  __ Branch(&one_byte, ne, at, Operand(zero_reg));
956  // Two-byte string.
957  __ dsll(at, index, 1);
958  __ Daddu(at, string, at);
959  __ lhu(result, MemOperand(at));
960  __ jmp(&done);
961  __ bind(&one_byte);
962  // One_byte string.
963  __ Daddu(at, string, index);
964  __ lbu(result, MemOperand(at));
965  __ bind(&done);
966 }
967 
968 
969 static MemOperand ExpConstant(int index, Register base) {
970  return MemOperand(base, index * kDoubleSize);
971 }
972 
973 
974 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
975  DoubleRegister input,
976  DoubleRegister result,
977  DoubleRegister double_scratch1,
978  DoubleRegister double_scratch2,
979  Register temp1,
980  Register temp2,
981  Register temp3) {
982  DCHECK(!input.is(result));
983  DCHECK(!input.is(double_scratch1));
984  DCHECK(!input.is(double_scratch2));
985  DCHECK(!result.is(double_scratch1));
986  DCHECK(!result.is(double_scratch2));
987  DCHECK(!double_scratch1.is(double_scratch2));
988  DCHECK(!temp1.is(temp2));
989  DCHECK(!temp1.is(temp3));
990  DCHECK(!temp2.is(temp3));
991  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
992  DCHECK(!masm->serializer_enabled()); // External references not serializable.
993 
994  Label zero, infinity, done;
995  __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
996 
997  __ ldc1(double_scratch1, ExpConstant(0, temp3));
998  __ BranchF(&zero, NULL, ge, double_scratch1, input);
999 
1000  __ ldc1(double_scratch2, ExpConstant(1, temp3));
1001  __ BranchF(&infinity, NULL, ge, input, double_scratch2);
1002 
1003  __ ldc1(double_scratch1, ExpConstant(3, temp3));
1004  __ ldc1(result, ExpConstant(4, temp3));
1005  __ mul_d(double_scratch1, double_scratch1, input);
1006  __ add_d(double_scratch1, double_scratch1, result);
1007  __ FmoveLow(temp2, double_scratch1);
1008  __ sub_d(double_scratch1, double_scratch1, result);
1009  __ ldc1(result, ExpConstant(6, temp3));
1010  __ ldc1(double_scratch2, ExpConstant(5, temp3));
1011  __ mul_d(double_scratch1, double_scratch1, double_scratch2);
1012  __ sub_d(double_scratch1, double_scratch1, input);
1013  __ sub_d(result, result, double_scratch1);
1014  __ mul_d(double_scratch2, double_scratch1, double_scratch1);
1015  __ mul_d(result, result, double_scratch2);
1016  __ ldc1(double_scratch2, ExpConstant(7, temp3));
1017  __ mul_d(result, result, double_scratch2);
1018  __ sub_d(result, result, double_scratch1);
1019  // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
1020  DCHECK(*reinterpret_cast<double*>
1021  (ExternalReference::math_exp_constants(8).address()) == 1);
1022  __ Move(double_scratch2, 1);
1023  __ add_d(result, result, double_scratch2);
1024  __ dsrl(temp1, temp2, 11);
1025  __ Ext(temp2, temp2, 0, 11);
1026  __ Daddu(temp1, temp1, Operand(0x3ff));
1027 
1028  // Must not call ExpConstant() after overwriting temp3!
1029  __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
1030  __ dsll(at, temp2, 3);
1031  __ Daddu(temp3, temp3, Operand(at));
1032  __ lwu(temp2, MemOperand(temp3, 0));
1033  __ lwu(temp3, MemOperand(temp3, kIntSize));
1034  // The first word is loaded is the lower number register.
1035  if (temp2.code() < temp3.code()) {
1036  __ dsll(at, temp1, 20);
1037  __ Or(temp1, temp3, at);
1038  __ Move(double_scratch1, temp2, temp1);
1039  } else {
1040  __ dsll(at, temp1, 20);
1041  __ Or(temp1, temp2, at);
1042  __ Move(double_scratch1, temp3, temp1);
1043  }
1044  __ mul_d(result, result, double_scratch1);
1045  __ BranchShort(&done);
1046 
1047  __ bind(&zero);
1048  __ Move(result, kDoubleRegZero);
1049  __ BranchShort(&done);
1050 
1051  __ bind(&infinity);
1052  __ ldc1(result, ExpConstant(2, temp3));
1053 
1054  __ bind(&done);
1055 }
1056 
1057 #ifdef DEBUG
1058 // nop(CODE_AGE_MARKER_NOP)
1059 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
1060 #endif
1061 
1062 
1065  // Since patcher is a large object, allocate it dynamically when needed,
1066  // to avoid overloading the stack in stress conditions.
1067  // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
1068  // the process, before MIPS simulator ICache is setup.
1069  SmartPointer<CodePatcher> patcher(
1070  new CodePatcher(young_sequence_.start(),
1073  PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
1074  patcher->masm()->Push(ra, fp, cp, a1);
1075  patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
1076  patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
1077  patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
1078  patcher->masm()->Daddu(
1080 }
1081 
1082 
1083 #ifdef DEBUG
1084 bool CodeAgingHelper::IsOld(byte* candidate) const {
1085  return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
1086 }
1087 #endif
1088 
1089 
1090 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
1091  bool result = isolate->code_aging_helper()->IsYoung(sequence);
1092  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
1093  return result;
1094 }
1095 
1096 
1097 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
1098  MarkingParity* parity) {
1099  if (IsYoungSequence(isolate, sequence)) {
1100  *age = kNoAgeCodeAge;
1101  *parity = NO_MARKING_PARITY;
1102  } else {
1103  Address target_address = Assembler::target_address_at(
1104  sequence + Assembler::kInstrSize);
1105  Code* stub = GetCodeFromTargetAddress(target_address);
1106  GetCodeAgeAndParity(stub, age, parity);
1107  }
1108 }
1109 
1110 
1111 void Code::PatchPlatformCodeAge(Isolate* isolate,
1112  byte* sequence,
1113  Code::Age age,
1114  MarkingParity parity) {
1115  uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
1116  if (age == kNoAgeCodeAge) {
1117  isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
1118  CpuFeatures::FlushICache(sequence, young_length);
1119  } else {
1120  Code* stub = GetCodeAgeStub(isolate, age, parity);
1121  CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
1122  // Mark this code sequence for FindPlatformCodeAgeSequence().
1123  patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
1124  // Load the stub address to t9 and call it,
1125  // GetCodeAgeAndParity() extracts the stub address from this instruction.
1126  patcher.masm()->li(
1127  t9,
1128  Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
1129  ADDRESS_LOAD);
1130  patcher.masm()->nop(); // Prevent jalr to jal optimization.
1131  patcher.masm()->jalr(t9, a0);
1132  patcher.masm()->nop(); // Branch delay slot nop.
1133  patcher.masm()->nop(); // Pad the empty space.
1134  }
1135 }
1136 
1137 
1138 #undef __
1139 
1140 } } // namespace v8::internal
1141 
1142 #endif // V8_TARGET_ARCH_MIPS64
#define kDoubleRegZero
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void ProtectCode(void *address, const size_t size)
static const int kInstrSize
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
const EmbeddedVector< byte, kNoCodeAgeSequenceLength > young_sequence_
Definition: codegen.h:171
static Code * GetCodeAgeStub(Isolate *isolate, Age age, MarkingParity parity)
Definition: objects.cc:10561
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:5018
static void PatchPlatformCodeAge(Isolate *isolate, byte *sequence, Age age, MarkingParity parity)
static bool IsYoungSequence(Isolate *isolate, byte *sequence)
static void GetCodeAgeAndParity(Code *code, Age *age, MarkingParity *parity)
Definition: objects.cc:10525
static const int kFirstOffset
Definition: objects.h:9061
static const int kSecondOffset
Definition: objects.h:9062
static void FlushICache(void *start, size_t size)
static void GenerateSmiToDouble(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *allocation_memento_found)
static void GenerateDoubleToObject(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static const int kResourceDataOffset
Definition: objects.h:9138
static const int kLengthOffset
Definition: objects.h:2392
static const int kHeaderSize
Definition: objects.h:2393
static const int kMapOffset
Definition: objects.h:1427
static const int kElementsOffset
Definition: objects.h:2194
static const int kInstanceTypeOffset
Definition: objects.h:6229
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
static uint32_t & uint32_at(Address addr)
Definition: v8memory.h:24
static const int kHeaderSize
Definition: objects.h:8941
static const int kParentOffset
Definition: objects.h:9104
static const int kOffsetOffset
Definition: objects.h:9105
static const int kFixedFrameSizeFromFp
Definition: frames.h:157
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
virtual void AfterCall(MacroAssembler *masm) const
virtual void BeforeCall(MacroAssembler *masm) const
T * start() const
Definition: vector.h:47
int length() const
Definition: vector.h:41
const bool IsMipsSoftFloatABI
#define __
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be aligned(ARM64 only)") DEFINE_STRING(expose_gc_as
#define DCHECK(condition)
Definition: logging.h:205
@ DOUBLE_ALIGNMENT
@ NO_ALLOCATION_FLAGS
int int32_t
Definition: unicode.cc:24
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
const int kPointerSize
Definition: globals.h:129
const uint32_t kStringEncodingMask
Definition: objects.h:555
const int32_t kPrefHintLoadStreamed
const int KB
Definition: globals.h:106
@ TRACK_ALLOCATION_SITE
Definition: objects.h:8085
@ kSeqStringTag
Definition: objects.h:563
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const Register cp
const uint32_t kTwoByteStringTag
Definition: objects.h:556
const uint32_t kShortExternalStringTag
Definition: objects.h:590
const int kDoubleSizeLog2
Definition: globals.h:138
const int kDoubleSize
Definition: globals.h:127
const Register fp
DwVfpRegister DoubleRegister
const FPURegister f4
const FPURegister f6
const Register sp
const int kPointerSizeLog2
Definition: globals.h:147
MemOperand FieldMemOperand(Register object, int offset)
UnaryMathFunction CreateExpFunction()
@ NO_MARKING_PARITY
Definition: objects.h:298
const FPURegister f12
const uint32_t kShortExternalStringMask
Definition: objects.h:589
const uint32_t kStringRepresentationMask
Definition: objects.h:561
byte * Address
Definition: globals.h:101
const uint32_t kSlicedNotConsMask
Definition: objects.h:579
const int32_t kPrefHintPrepareForStore
const int kIntSize
Definition: globals.h:124
const int kHeapObjectTag
Definition: v8.h:5737
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
UnaryMathFunction CreateSqrtFunction()
double(* UnaryMathFunction)(double x)
Definition: codegen.h:98
static const int kNoCodeAgeSequenceLength
const uint32_t kHoleNanLower32
Definition: globals.h:657
const uint32_t kIsIndirectStringMask
Definition: objects.h:568
const FPURegister f0
const uint32_t kHoleNanUpper32
Definition: globals.h:656
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20