V8 Project
hydrogen-bce.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/hydrogen-bce.h"
6 
7 namespace v8 {
8 namespace internal {
9 
10 
11 // We try to "factor up" HBoundsCheck instructions towards the root of the
12 // dominator tree.
13 // For now we handle checks where the index is like "exp + int32value".
14 // If in the dominator tree we check "exp + v1" and later (dominated)
15 // "exp + v2", if v2 <= v1 we can safely remove the second check, and if
16 // v2 > v1 we can use v2 in the 1st check and again remove the second.
17 // To do so we keep a dictionary of all checks where the key if the pair
18 // "exp, length".
19 // The class BoundsCheckKey represents this key.
20 class BoundsCheckKey : public ZoneObject {
21  public:
22  HValue* IndexBase() const { return index_base_; }
23  HValue* Length() const { return length_; }
24 
26  return static_cast<uint32_t>(index_base_->Hashcode() ^ length_->Hashcode());
27  }
28 
29  static BoundsCheckKey* Create(Zone* zone,
30  HBoundsCheck* check,
31  int32_t* offset) {
32  if (!check->index()->representation().IsSmiOrInteger32()) return NULL;
33 
34  HValue* index_base = NULL;
35  HConstant* constant = NULL;
36  bool is_sub = false;
37 
38  if (check->index()->IsAdd()) {
39  HAdd* index = HAdd::cast(check->index());
40  if (index->left()->IsConstant()) {
41  constant = HConstant::cast(index->left());
42  index_base = index->right();
43  } else if (index->right()->IsConstant()) {
44  constant = HConstant::cast(index->right());
45  index_base = index->left();
46  }
47  } else if (check->index()->IsSub()) {
48  HSub* index = HSub::cast(check->index());
49  is_sub = true;
50  if (index->right()->IsConstant()) {
51  constant = HConstant::cast(index->right());
52  index_base = index->left();
53  }
54  } else if (check->index()->IsConstant()) {
55  index_base = check->block()->graph()->GetConstant0();
56  constant = HConstant::cast(check->index());
57  }
58 
59  if (constant != NULL && constant->HasInteger32Value()) {
60  *offset = is_sub ? - constant->Integer32Value()
61  : constant->Integer32Value();
62  } else {
63  *offset = 0;
64  index_base = check->index();
65  }
66 
67  return new(zone) BoundsCheckKey(index_base, check->length());
68  }
69 
70  private:
71  BoundsCheckKey(HValue* index_base, HValue* length)
72  : index_base_(index_base),
73  length_(length) { }
74 
77 
79 };
80 
81 
82 // Data about each HBoundsCheck that can be eliminated or moved.
83 // It is the "value" in the dictionary indexed by "base-index, length"
84 // (the key is BoundsCheckKey).
85 // We scan the code with a dominator tree traversal.
86 // Traversing the dominator tree we keep a stack (implemented as a singly
87 // linked list) of "data" for each basic block that contains a relevant check
88 // with the same key (the dictionary holds the head of the list).
89 // We also keep all the "data" created for a given basic block in a list, and
90 // use it to "clean up" the dictionary when backtracking in the dominator tree
91 // traversal.
92 // Doing this each dictionary entry always directly points to the check that
93 // is dominating the code being examined now.
94 // We also track the current "offset" of the index expression and use it to
95 // decide if any check is already "covered" (so it can be removed) or not.
97  public:
98  BoundsCheckKey* Key() const { return key_; }
99  int32_t LowerOffset() const { return lower_offset_; }
100  int32_t UpperOffset() const { return upper_offset_; }
101  HBasicBlock* BasicBlock() const { return basic_block_; }
102  HBoundsCheck* LowerCheck() const { return lower_check_; }
103  HBoundsCheck* UpperCheck() const { return upper_check_; }
106 
107  bool OffsetIsCovered(int32_t offset) const {
108  return offset >= LowerOffset() && offset <= UpperOffset();
109  }
110 
112 
113  void UpdateUpperOffsets(HBoundsCheck* check, int32_t offset) {
115  while (data != NULL && data->UpperCheck() == check) {
116  DCHECK(data->upper_offset_ < offset);
117  data->upper_offset_ = offset;
118  data = data->FatherInDominatorTree();
119  }
120  }
121 
122  void UpdateLowerOffsets(HBoundsCheck* check, int32_t offset) {
124  while (data != NULL && data->LowerCheck() == check) {
125  DCHECK(data->lower_offset_ > offset);
126  data->lower_offset_ = offset;
127  data = data->FatherInDominatorTree();
128  }
129  }
130 
131  // The goal of this method is to modify either upper_offset_ or
132  // lower_offset_ so that also new_offset is covered (the covered
133  // range grows).
134  //
135  // The precondition is that new_check follows UpperCheck() and
136  // LowerCheck() in the same basic block, and that new_offset is not
137  // covered (otherwise we could simply remove new_check).
138  //
139  // If HasSingleCheck() is true then new_check is added as "second check"
140  // (either upper or lower; note that HasSingleCheck() becomes false).
141  // Otherwise one of the current checks is modified so that it also covers
142  // new_offset, and new_check is removed.
143  void CoverCheck(HBoundsCheck* new_check,
144  int32_t new_offset) {
145  DCHECK(new_check->index()->representation().IsSmiOrInteger32());
146  bool keep_new_check = false;
147 
148  if (new_offset > upper_offset_) {
149  upper_offset_ = new_offset;
150  if (HasSingleCheck()) {
151  keep_new_check = true;
152  upper_check_ = new_check;
153  } else {
154  TightenCheck(upper_check_, new_check, new_offset);
156  }
157  } else if (new_offset < lower_offset_) {
158  lower_offset_ = new_offset;
159  if (HasSingleCheck()) {
160  keep_new_check = true;
161  lower_check_ = new_check;
162  } else {
163  TightenCheck(lower_check_, new_check, new_offset);
165  }
166  } else {
167  // Should never have called CoverCheck() in this case.
168  UNREACHABLE();
169  }
170 
171  if (!keep_new_check) {
172  if (FLAG_trace_bce) {
173  base::OS::Print("Eliminating check #%d after tightening\n",
174  new_check->id());
175  }
176  new_check->block()->graph()->isolate()->counters()->
177  bounds_checks_eliminated()->Increment();
178  new_check->DeleteAndReplaceWith(new_check->ActualValue());
179  } else {
180  HBoundsCheck* first_check = new_check == lower_check_ ? upper_check_
181  : lower_check_;
182  if (FLAG_trace_bce) {
183  base::OS::Print("Moving second check #%d after first check #%d\n",
184  new_check->id(), first_check->id());
185  }
186  // The length is guaranteed to be live at first_check.
187  DCHECK(new_check->length() == first_check->length());
188  HInstruction* old_position = new_check->next();
189  new_check->Unlink();
190  new_check->InsertAfter(first_check);
191  MoveIndexIfNecessary(new_check->index(), new_check, old_position);
192  }
193  }
194 
196  int32_t lower_offset,
197  int32_t upper_offset,
198  HBasicBlock* bb,
199  HBoundsCheck* lower_check,
200  HBoundsCheck* upper_check,
201  BoundsCheckBbData* next_in_bb,
202  BoundsCheckBbData* father_in_dt)
203  : key_(key),
204  lower_offset_(lower_offset),
205  upper_offset_(upper_offset),
206  basic_block_(bb),
207  lower_check_(lower_check),
208  upper_check_(upper_check),
209  next_in_bb_(next_in_bb),
210  father_in_dt_(father_in_dt) { }
211 
212  private:
216  HBasicBlock* basic_block_;
217  HBoundsCheck* lower_check_;
218  HBoundsCheck* upper_check_;
221 
222  void MoveIndexIfNecessary(HValue* index_raw,
223  HBoundsCheck* insert_before,
224  HInstruction* end_of_scan_range) {
225  // index_raw can be HAdd(index_base, offset), HSub(index_base, offset),
226  // HConstant(offset) or index_base directly.
227  // In the latter case, no need to move anything.
228  if (index_raw->IsAdd() || index_raw->IsSub()) {
231  HValue* left_input = index->left();
232  HValue* right_input = index->right();
233  bool must_move_index = false;
234  bool must_move_left_input = false;
235  bool must_move_right_input = false;
236  for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) {
237  if (cursor == left_input) must_move_left_input = true;
238  if (cursor == right_input) must_move_right_input = true;
239  if (cursor == index) must_move_index = true;
240  if (cursor->previous() == NULL) {
241  cursor = cursor->block()->dominator()->end();
242  } else {
243  cursor = cursor->previous();
244  }
245  }
246  if (must_move_index) {
247  index->Unlink();
248  index->InsertBefore(insert_before);
249  }
250  // The BCE algorithm only selects mergeable bounds checks that share
251  // the same "index_base", so we'll only ever have to move constants.
252  if (must_move_left_input) {
253  HConstant::cast(left_input)->Unlink();
254  HConstant::cast(left_input)->InsertBefore(index);
255  }
256  if (must_move_right_input) {
257  HConstant::cast(right_input)->Unlink();
258  HConstant::cast(right_input)->InsertBefore(index);
259  }
260  } else if (index_raw->IsConstant()) {
261  HConstant* index = HConstant::cast(index_raw);
262  bool must_move = false;
263  for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) {
264  if (cursor == index) must_move = true;
265  if (cursor->previous() == NULL) {
266  cursor = cursor->block()->dominator()->end();
267  } else {
268  cursor = cursor->previous();
269  }
270  }
271  if (must_move) {
272  index->Unlink();
273  index->InsertBefore(insert_before);
274  }
275  }
276  }
277 
278  void TightenCheck(HBoundsCheck* original_check,
279  HBoundsCheck* tighter_check,
280  int32_t new_offset) {
281  DCHECK(original_check->length() == tighter_check->length());
282  MoveIndexIfNecessary(tighter_check->index(), original_check, tighter_check);
283  original_check->ReplaceAllUsesWith(original_check->index());
284  original_check->SetOperandAt(0, tighter_check->index());
285  if (FLAG_trace_bce) {
286  base::OS::Print("Tightened check #%d with offset %d from #%d\n",
287  original_check->id(), new_offset, tighter_check->id());
288  }
289  }
290 
292 };
293 
294 
295 static bool BoundsCheckKeyMatch(void* key1, void* key2) {
296  BoundsCheckKey* k1 = static_cast<BoundsCheckKey*>(key1);
297  BoundsCheckKey* k2 = static_cast<BoundsCheckKey*>(key2);
298  return k1->IndexBase() == k2->IndexBase() && k1->Length() == k2->Length();
299 }
300 
301 
303  : ZoneHashMap(BoundsCheckKeyMatch, ZoneHashMap::kDefaultHashMapCapacity,
304  ZoneAllocationPolicy(zone)) { }
305 
306 
307 BoundsCheckBbData** BoundsCheckTable::LookupOrInsert(BoundsCheckKey* key,
308  Zone* zone) {
309  return reinterpret_cast<BoundsCheckBbData**>(
310  &(Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value));
311 }
312 
313 
314 void BoundsCheckTable::Insert(BoundsCheckKey* key,
315  BoundsCheckBbData* data,
316  Zone* zone) {
317  Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value = data;
318 }
319 
320 
321 void BoundsCheckTable::Delete(BoundsCheckKey* key) {
322  Remove(key, key->Hash());
323 }
324 
325 
327  public:
328  HBasicBlock* block_;
330  int index_;
331 };
332 
333 
334 // Eliminates checks in bb and recursively in the dominated blocks.
335 // Also replace the results of check instructions with the original value, if
336 // the result is used. This is safe now, since we don't do code motion after
337 // this point. It enables better register allocation since the value produced
338 // by check instructions is really a copy of the original value.
340  HBasicBlock* entry) {
341  // Allocate the stack.
343  zone()->NewArray<HBoundsCheckEliminationState>(graph()->blocks()->length());
344 
345  // Explicitly push the entry block.
346  stack[0].block_ = entry;
347  stack[0].bb_data_list_ = PreProcessBlock(entry);
348  stack[0].index_ = 0;
349  int stack_depth = 1;
350 
351  // Implement depth-first traversal with a stack.
352  while (stack_depth > 0) {
353  int current = stack_depth - 1;
354  HBoundsCheckEliminationState* state = &stack[current];
355  const ZoneList<HBasicBlock*>* children = state->block_->dominated_blocks();
356 
357  if (state->index_ < children->length()) {
358  // Recursively visit children blocks.
359  HBasicBlock* child = children->at(state->index_++);
360  int next = stack_depth++;
361  stack[next].block_ = child;
362  stack[next].bb_data_list_ = PreProcessBlock(child);
363  stack[next].index_ = 0;
364  } else {
365  // Finished with all children; post process the block.
366  PostProcessBlock(state->block_, state->bb_data_list_);
367  stack_depth--;
368  }
369  }
370 }
371 
372 
374  HBasicBlock* bb) {
375  BoundsCheckBbData* bb_data_list = NULL;
376 
377  for (HInstructionIterator it(bb); !it.Done(); it.Advance()) {
378  HInstruction* i = it.Current();
379  if (!i->IsBoundsCheck()) continue;
380 
381  HBoundsCheck* check = HBoundsCheck::cast(i);
382  int32_t offset;
383  BoundsCheckKey* key =
384  BoundsCheckKey::Create(zone(), check, &offset);
385  if (key == NULL) continue;
386  BoundsCheckBbData** data_p = table_.LookupOrInsert(key, zone());
387  BoundsCheckBbData* data = *data_p;
388  if (data == NULL) {
389  bb_data_list = new(zone()) BoundsCheckBbData(key,
390  offset,
391  offset,
392  bb,
393  check,
394  check,
395  bb_data_list,
396  NULL);
397  *data_p = bb_data_list;
398  if (FLAG_trace_bce) {
399  base::OS::Print("Fresh bounds check data for block #%d: [%d]\n",
400  bb->block_id(), offset);
401  }
402  } else if (data->OffsetIsCovered(offset)) {
403  bb->graph()->isolate()->counters()->
404  bounds_checks_eliminated()->Increment();
405  if (FLAG_trace_bce) {
406  base::OS::Print("Eliminating bounds check #%d, offset %d is covered\n",
407  check->id(), offset);
408  }
409  check->DeleteAndReplaceWith(check->ActualValue());
410  } else if (data->BasicBlock() == bb) {
411  // TODO(jkummerow): I think the following logic would be preferable:
412  // if (data->Basicblock() == bb ||
413  // graph()->use_optimistic_licm() ||
414  // bb->IsLoopSuccessorDominator()) {
415  // data->CoverCheck(check, offset)
416  // } else {
417  // /* add pristine BCBbData like in (data == NULL) case above */
418  // }
419  // Even better would be: distinguish between read-only dominator-imposed
420  // knowledge and modifiable upper/lower checks.
421  // What happens currently is that the first bounds check in a dominated
422  // block will stay around while any further checks are hoisted out,
423  // which doesn't make sense. Investigate/fix this in a future CL.
424  data->CoverCheck(check, offset);
425  } else if (graph()->use_optimistic_licm() ||
426  bb->IsLoopSuccessorDominator()) {
427  int32_t new_lower_offset = offset < data->LowerOffset()
428  ? offset
429  : data->LowerOffset();
430  int32_t new_upper_offset = offset > data->UpperOffset()
431  ? offset
432  : data->UpperOffset();
433  bb_data_list = new(zone()) BoundsCheckBbData(key,
434  new_lower_offset,
435  new_upper_offset,
436  bb,
437  data->LowerCheck(),
438  data->UpperCheck(),
439  bb_data_list,
440  data);
441  if (FLAG_trace_bce) {
442  base::OS::Print("Updated bounds check data for block #%d: [%d - %d]\n",
443  bb->block_id(), new_lower_offset, new_upper_offset);
444  }
445  table_.Insert(key, bb_data_list, zone());
446  }
447  }
448 
449  return bb_data_list;
450 }
451 
452 
454  HBasicBlock* block, BoundsCheckBbData* data) {
455  while (data != NULL) {
456  if (data->FatherInDominatorTree()) {
457  table_.Insert(data->Key(), data->FatherInDominatorTree(), zone());
458  } else {
459  table_.Delete(data->Key());
460  }
461  data = data->NextInBasicBlock();
462  }
463 }
464 
465 } } // namespace v8::internal
static void Print(const char *format,...)
BoundsCheckBbData * next_in_bb_
void CoverCheck(HBoundsCheck *new_check, int32_t new_offset)
BoundsCheckBbData * father_in_dt_
void UpdateLowerOffsets(HBoundsCheck *check, int32_t offset)
BoundsCheckBbData * FatherInDominatorTree() const
bool OffsetIsCovered(int32_t offset) const
HBasicBlock * BasicBlock() const
BoundsCheckBbData(BoundsCheckKey *key, int32_t lower_offset, int32_t upper_offset, HBasicBlock *bb, HBoundsCheck *lower_check, HBoundsCheck *upper_check, BoundsCheckBbData *next_in_bb, BoundsCheckBbData *father_in_dt)
BoundsCheckBbData * NextInBasicBlock() const
DISALLOW_COPY_AND_ASSIGN(BoundsCheckBbData)
void TightenCheck(HBoundsCheck *original_check, HBoundsCheck *tighter_check, int32_t new_offset)
void UpdateUpperOffsets(HBoundsCheck *check, int32_t offset)
void MoveIndexIfNecessary(HValue *index_raw, HBoundsCheck *insert_before, HInstruction *end_of_scan_range)
HBoundsCheck * LowerCheck() const
BoundsCheckKey * Key() const
Definition: hydrogen-bce.cc:98
HBoundsCheck * UpperCheck() const
BoundsCheckKey(HValue *index_base, HValue *length)
Definition: hydrogen-bce.cc:71
HValue * IndexBase() const
Definition: hydrogen-bce.cc:22
static BoundsCheckKey * Create(Zone *zone, HBoundsCheck *check, int32_t *offset)
Definition: hydrogen-bce.cc:29
DISALLOW_COPY_AND_ASSIGN(BoundsCheckKey)
void PostProcessBlock(HBasicBlock *bb, BoundsCheckBbData *data)
void EliminateRedundantBoundsChecks(HBasicBlock *bb)
BoundsCheckBbData * PreProcessBlock(HBasicBlock *bb)
void InsertBefore(HInstruction *next)
HGraph * graph() const
Definition: hydrogen.h:2802
static HValue * cast(HValue *value)
HBasicBlock * block() const
virtual intptr_t Hashcode()
T & at(int i) const
Definition: list.h:69
Entry * Lookup(void *key, uint32_t hash, bool insert, ZoneAllocationPolicy allocator=ZoneAllocationPolicy())
Definition: hashmap.h:114
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
int int32_t
Definition: unicode.cc:24
static bool BoundsCheckKeyMatch(void *key1, void *key2)
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20