V8 Project
optimizing-compiler-thread.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
7 #include "src/v8.h"
8 
9 #include "src/base/atomicops.h"
10 #include "src/full-codegen.h"
11 #include "src/hydrogen.h"
12 #include "src/isolate.h"
13 #include "src/v8threads.h"
14 
15 namespace v8 {
16 namespace internal {
17 
21  if (FLAG_concurrent_osr) {
22 #ifdef DEBUG
23  for (int i = 0; i < osr_buffer_capacity_; i++) {
25  }
26 #endif
28  }
29 }
30 
31 
33 #ifdef DEBUG
34  { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
35  thread_id_ = ThreadId::Current().ToInteger();
36  }
37 #endif
39  DisallowHeapAllocation no_allocation;
40  DisallowHandleAllocation no_handles;
42 
43  base::ElapsedTimer total_timer;
44  if (FLAG_trace_concurrent_recompilation) total_timer.Start();
45 
46  while (true) {
49 
50  if (FLAG_concurrent_recompilation_delay != 0) {
51  base::OS::Sleep(FLAG_concurrent_recompilation_delay);
52  }
53 
54  switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
55  case CONTINUE:
56  break;
57  case STOP:
58  if (FLAG_trace_concurrent_recompilation) {
59  time_spent_total_ = total_timer.Elapsed();
60  }
61  stop_semaphore_.Signal();
62  return;
63  case FLUSH:
64  // The main thread is blocked, waiting for the stop semaphore.
65  { AllowHandleDereference allow_handle_dereference;
66  FlushInputQueue(true);
67  }
69  static_cast<base::AtomicWord>(CONTINUE));
70  stop_semaphore_.Signal();
71  // Return to start of consumer loop.
72  continue;
73  }
74 
75  base::ElapsedTimer compiling_timer;
76  if (FLAG_trace_concurrent_recompilation) compiling_timer.Start();
77 
78  CompileNext();
79 
80  if (FLAG_trace_concurrent_recompilation) {
81  time_spent_compiling_ += compiling_timer.Elapsed();
82  }
83  }
84 }
85 
86 
88  base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
89  if (input_queue_length_ == 0) return NULL;
91  DCHECK_NE(NULL, job);
94  return job;
95 }
96 
97 
100  DCHECK_NE(NULL, job);
101 
102  // The function may have already been optimized by OSR. Simply continue.
104  USE(status); // Prevent an unused-variable error in release mode.
106 
107  // The function may have already been optimized by OSR. Simply continue.
108  // Use a mutex to make sure that functions marked for install
109  // are always also queued.
110  output_queue_.Enqueue(job);
111  isolate_->stack_guard()->RequestInstallCode();
112 }
113 
114 
116  bool restore_function_code) {
117  // The recompile job is allocated in the CompilationInfo's zone.
118  CompilationInfo* info = job->info();
119  if (restore_function_code) {
120  if (info->is_osr()) {
121  if (!job->IsWaitingForInstall()) {
122  // Remove stack check that guards OSR entry on original code.
123  Handle<Code> code = info->unoptimized_code();
124  uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
125  BackEdgeTable::RemoveStackCheck(code, offset);
126  }
127  } else {
128  Handle<JSFunction> function = info->closure();
129  function->ReplaceCode(function->shared()->code());
130  }
131  }
132  delete info;
133 }
134 
135 
136 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
137  OptimizedCompileJob* job;
138  while ((job = NextInput())) {
139  // This should not block, since we have one signal on the input queue
140  // semaphore corresponding to each element in the input queue.
141  input_queue_semaphore_.Wait();
142  // OSR jobs are dealt with separately.
143  if (!job->info()->is_osr()) {
144  DisposeOptimizedCompileJob(job, restore_function_code);
145  }
146  }
147 }
148 
149 
150 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
151  OptimizedCompileJob* job;
152  while (output_queue_.Dequeue(&job)) {
153  // OSR jobs are dealt with separately.
154  if (!job->info()->is_osr()) {
155  DisposeOptimizedCompileJob(job, restore_function_code);
156  }
157  }
158 }
159 
160 
161 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
162  for (int i = 0; i < osr_buffer_capacity_; i++) {
163  if (osr_buffer_[i] != NULL) {
164  DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
165  osr_buffer_[i] = NULL;
166  }
167  }
168 }
169 
170 
172  DCHECK(!IsOptimizerThread());
174  if (FLAG_block_concurrent_recompilation) Unblock();
175  input_queue_semaphore_.Signal();
176  stop_semaphore_.Wait();
177  FlushOutputQueue(true);
178  if (FLAG_concurrent_osr) FlushOsrBuffer(true);
179  if (FLAG_trace_concurrent_recompilation) {
180  PrintF(" ** Flushed concurrent recompilation queues.\n");
181  }
182 }
183 
184 
186  DCHECK(!IsOptimizerThread());
188  if (FLAG_block_concurrent_recompilation) Unblock();
189  input_queue_semaphore_.Signal();
190  stop_semaphore_.Wait();
191 
192  if (FLAG_concurrent_recompilation_delay != 0) {
193  // At this point the optimizing compiler thread's event loop has stopped.
194  // There is no need for a mutex when reading input_queue_length_.
195  while (input_queue_length_ > 0) CompileNext();
197  } else {
198  FlushInputQueue(false);
199  FlushOutputQueue(false);
200  }
201 
202  if (FLAG_concurrent_osr) FlushOsrBuffer(false);
203 
204  if (FLAG_trace_concurrent_recompilation) {
205  double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
206  PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
207  }
208 
209  if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
210  FLAG_concurrent_osr) {
211  PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
212  }
213 
214  Join();
215 }
216 
217 
219  DCHECK(!IsOptimizerThread());
220  HandleScope handle_scope(isolate_);
221 
222  OptimizedCompileJob* job;
223  while (output_queue_.Dequeue(&job)) {
224  CompilationInfo* info = job->info();
225  Handle<JSFunction> function(*info->closure());
226  if (info->is_osr()) {
227  if (FLAG_trace_osr) {
228  PrintF("[COSR - ");
229  function->ShortPrint();
230  PrintF(" is ready for install and entry at AST id %d]\n",
231  info->osr_ast_id().ToInt());
232  }
233  job->WaitForInstall();
234  // Remove stack check that guards OSR entry on original code.
235  Handle<Code> code = info->unoptimized_code();
236  uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
237  BackEdgeTable::RemoveStackCheck(code, offset);
238  } else {
239  if (function->IsOptimized()) {
240  if (FLAG_trace_concurrent_recompilation) {
241  PrintF(" ** Aborting compilation for ");
242  function->ShortPrint();
243  PrintF(" as it has already been optimized.\n");
244  }
245  DisposeOptimizedCompileJob(job, false);
246  } else {
248  function->ReplaceCode(
249  code.is_null() ? function->shared()->code() : *code);
250  }
251  }
252  }
253 }
254 
255 
258  DCHECK(!IsOptimizerThread());
259  CompilationInfo* info = job->info();
260  if (info->is_osr()) {
261  osr_attempts_++;
262  AddToOsrBuffer(job);
263  // Add job to the front of the input queue.
264  base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
266  // Move shift_ back by one.
268  input_queue_[InputQueueIndex(0)] = job;
270  } else {
271  // Add job to the back of the input queue.
272  base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
276  }
277  if (FLAG_block_concurrent_recompilation) {
278  blocked_jobs_++;
279  } else {
280  input_queue_semaphore_.Signal();
281  }
282 }
283 
284 
286  DCHECK(!IsOptimizerThread());
287  while (blocked_jobs_ > 0) {
288  input_queue_semaphore_.Signal();
289  blocked_jobs_--;
290  }
291 }
292 
293 
295  Handle<JSFunction> function, BailoutId osr_ast_id) {
296  DCHECK(!IsOptimizerThread());
297  for (int i = 0; i < osr_buffer_capacity_; i++) {
298  OptimizedCompileJob* current = osr_buffer_[i];
299  if (current != NULL &&
300  current->IsWaitingForInstall() &&
301  current->info()->HasSameOsrEntry(function, osr_ast_id)) {
302  osr_hits_++;
303  osr_buffer_[i] = NULL;
304  return current;
305  }
306  }
307  return NULL;
308 }
309 
310 
312  BailoutId osr_ast_id) {
313  DCHECK(!IsOptimizerThread());
314  for (int i = 0; i < osr_buffer_capacity_; i++) {
315  OptimizedCompileJob* current = osr_buffer_[i];
316  if (current != NULL &&
317  current->info()->HasSameOsrEntry(function, osr_ast_id)) {
318  return !current->IsWaitingForInstall();
319  }
320  }
321  return false;
322 }
323 
324 
326  DCHECK(!IsOptimizerThread());
327  for (int i = 0; i < osr_buffer_capacity_; i++) {
328  OptimizedCompileJob* current = osr_buffer_[i];
329  if (current != NULL && *current->info()->closure() == function) {
330  return !current->IsWaitingForInstall();
331  }
332  }
333  return false;
334 }
335 
336 
338  DCHECK(!IsOptimizerThread());
339  // Find the next slot that is empty or has a stale job.
340  OptimizedCompileJob* stale = NULL;
341  while (true) {
343  if (stale == NULL || stale->IsWaitingForInstall()) break;
345  }
346 
347  // Add to found slot and dispose the evicted job.
348  if (stale != NULL) {
349  DCHECK(stale->IsWaitingForInstall());
350  CompilationInfo* info = stale->info();
351  if (FLAG_trace_osr) {
352  PrintF("[COSR - Discarded ");
353  info->closure()->PrintName();
354  PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
355  }
356  DisposeOptimizedCompileJob(stale, false);
357  }
360 }
361 
362 
363 #ifdef DEBUG
364 bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) {
365  return isolate->concurrent_recompilation_enabled() &&
366  isolate->optimizing_compiler_thread()->IsOptimizerThread();
367 }
368 
369 
370 bool OptimizingCompilerThread::IsOptimizerThread() {
371  base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
372  return ThreadId::Current().ToInteger() == thread_id_;
373 }
374 #endif
375 
376 
377 } } // namespace v8::internal
static void Sleep(const int milliseconds)
static void RemoveStackCheck(Handle< Code > code, uint32_t pc_offset)
int ToInt() const
Definition: utils.h:958
BailoutId osr_ast_id() const
Definition: compiler.h:128
Handle< Code > unoptimized_code() const
Definition: compiler.h:129
bool HasSameOsrEntry(Handle< JSFunction > function, BailoutId osr_ast_id)
Definition: compiler.h:382
Handle< JSFunction > closure() const
Definition: compiler.h:111
static Handle< Code > GetConcurrentlyOptimizedCode(OptimizedCompileJob *job)
Definition: compiler.cc:1354
bool is_null() const
Definition: handles.h:124
StackGuard * stack_guard()
Definition: isolate.h:872
static void SetIsolateThreadLocals(Isolate *isolate, PerIsolateThreadData *data)
Definition: isolate.cc:1676
OptimizingCompilerThread * optimizing_compiler_thread()
Definition: isolate.h:1059
bool concurrent_recompilation_enabled()
Definition: isolate.h:1045
MUST_USE_RESULT Status OptimizeGraph()
Definition: compiler.cc:441
CompilationInfo * info() const
Definition: compiler.h:601
void FlushInputQueue(bool restore_function_code)
UnboundQueue< OptimizedCompileJob * > output_queue_
void AddToOsrBuffer(OptimizedCompileJob *compiler)
bool IsQueuedForOSR(Handle< JSFunction > function, BailoutId osr_ast_id)
void FlushOsrBuffer(bool restore_function_code)
void FlushOutputQueue(bool restore_function_code)
OptimizedCompileJob * FindReadyOSRCandidate(Handle< JSFunction > function, BailoutId osr_ast_id)
void QueueForOptimization(OptimizedCompileJob *optimizing_compiler)
int ToInteger() const
Definition: isolate.h:202
static ThreadId Current()
Definition: isolate.h:185
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define CHECK_EQ(expected, value)
Definition: logging.h:169
#define DCHECK_NE(v1, v2)
Definition: logging.h:207
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_LT(v1, v2)
Definition: logging.h:209
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
void USE(T)
Definition: macros.h:322
intptr_t AtomicWord
Definition: atomicops.h:57
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
void DeleteArray(T *array)
Definition: allocation.h:68
void PrintF(const char *format,...)
Definition: utils.cc:80
static void DisposeOptimizedCompileJob(OptimizedCompileJob *job, bool restore_function_code)
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20