| 1 | //===- Block.cpp - MLIR Block Class ---------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #include "mlir/IR/Block.h" |
| 10 | |
| 11 | #include "mlir/IR/Builders.h" |
| 12 | #include "mlir/IR/Operation.h" |
| 13 | |
| 14 | using namespace mlir; |
| 15 | |
| 16 | //===----------------------------------------------------------------------===// |
| 17 | // Block |
| 18 | //===----------------------------------------------------------------------===// |
| 19 | |
| 20 | Block::~Block() { |
| 21 | assert(!verifyOpOrder() && "Expected valid operation ordering." ); |
| 22 | clear(); |
| 23 | for (BlockArgument arg : arguments) |
| 24 | arg.destroy(); |
| 25 | } |
| 26 | |
| 27 | Region *Block::getParent() const { return parentValidOpOrderPair.getPointer(); } |
| 28 | |
| 29 | /// Returns the closest surrounding operation that contains this block or |
| 30 | /// nullptr if this block is unlinked. |
| 31 | Operation *Block::getParentOp() { |
| 32 | return getParent() ? getParent()->getParentOp() : nullptr; |
| 33 | } |
| 34 | |
| 35 | /// Return if this block is the entry block in the parent region. |
| 36 | bool Block::isEntryBlock() { return this == &getParent()->front(); } |
| 37 | |
| 38 | /// Insert this block (which must not already be in a region) right before the |
| 39 | /// specified block. |
| 40 | void Block::insertBefore(Block *block) { |
| 41 | assert(!getParent() && "already inserted into a block!" ); |
| 42 | assert(block->getParent() && "cannot insert before a block without a parent" ); |
| 43 | block->getParent()->getBlocks().insert(where: block->getIterator(), New: this); |
| 44 | } |
| 45 | |
| 46 | void Block::insertAfter(Block *block) { |
| 47 | assert(!getParent() && "already inserted into a block!" ); |
| 48 | assert(block->getParent() && "cannot insert before a block without a parent" ); |
| 49 | block->getParent()->getBlocks().insertAfter(where: block->getIterator(), New: this); |
| 50 | } |
| 51 | |
| 52 | /// Unlink this block from its current region and insert it right before the |
| 53 | /// specific block. |
| 54 | void Block::moveBefore(Block *block) { |
| 55 | assert(block->getParent() && "cannot insert before a block without a parent" ); |
| 56 | moveBefore(region: block->getParent(), iterator: block->getIterator()); |
| 57 | } |
| 58 | |
| 59 | /// Unlink this block from its current region and insert it right before the |
| 60 | /// block that the given iterator points to in the region region. |
| 61 | void Block::moveBefore(Region *region, llvm::iplist<Block>::iterator iterator) { |
| 62 | region->getBlocks().splice(where: iterator, L2&: getParent()->getBlocks(), first: getIterator()); |
| 63 | } |
| 64 | |
| 65 | /// Unlink this Block from its parent Region and delete it. |
| 66 | void Block::erase() { |
| 67 | assert(getParent() && "Block has no parent" ); |
| 68 | getParent()->getBlocks().erase(IT: this); |
| 69 | } |
| 70 | |
| 71 | /// Returns 'op' if 'op' lies in this block, or otherwise finds the |
| 72 | /// ancestor operation of 'op' that lies in this block. Returns nullptr if |
| 73 | /// the latter fails. |
| 74 | Operation *Block::findAncestorOpInBlock(Operation &op) { |
| 75 | // Traverse up the operation hierarchy starting from the owner of operand to |
| 76 | // find the ancestor operation that resides in the block of 'forOp'. |
| 77 | auto *currOp = &op; |
| 78 | while (currOp->getBlock() != this) { |
| 79 | currOp = currOp->getParentOp(); |
| 80 | if (!currOp) |
| 81 | return nullptr; |
| 82 | } |
| 83 | return currOp; |
| 84 | } |
| 85 | |
| 86 | /// This drops all operand uses from operations within this block, which is |
| 87 | /// an essential step in breaking cyclic dependences between references when |
| 88 | /// they are to be deleted. |
| 89 | void Block::dropAllReferences() { |
| 90 | for (Operation &i : *this) |
| 91 | i.dropAllReferences(); |
| 92 | } |
| 93 | |
| 94 | void Block::dropAllDefinedValueUses() { |
| 95 | for (auto arg : getArguments()) |
| 96 | arg.dropAllUses(); |
| 97 | for (auto &op : *this) |
| 98 | op.dropAllDefinedValueUses(); |
| 99 | dropAllUses(); |
| 100 | } |
| 101 | |
| 102 | /// Returns true if the ordering of the child operations is valid, false |
| 103 | /// otherwise. |
| 104 | bool Block::isOpOrderValid() { return parentValidOpOrderPair.getInt(); } |
| 105 | |
| 106 | /// Invalidates the current ordering of operations. |
| 107 | void Block::invalidateOpOrder() { |
| 108 | // Validate the current ordering. |
| 109 | assert(!verifyOpOrder()); |
| 110 | parentValidOpOrderPair.setInt(false); |
| 111 | } |
| 112 | |
| 113 | /// Verifies the current ordering of child operations. Returns false if the |
| 114 | /// order is valid, true otherwise. |
| 115 | bool Block::verifyOpOrder() { |
| 116 | // The order is already known to be invalid. |
| 117 | if (!isOpOrderValid()) |
| 118 | return false; |
| 119 | // The order is valid if there are less than 2 operations. |
| 120 | if (operations.empty() || llvm::hasSingleElement(C&: operations)) |
| 121 | return false; |
| 122 | |
| 123 | Operation *prev = nullptr; |
| 124 | for (auto &i : *this) { |
| 125 | // The previous operation must have a smaller order index than the next as |
| 126 | // it appears earlier in the list. |
| 127 | if (prev && prev->orderIndex != Operation::kInvalidOrderIdx && |
| 128 | prev->orderIndex >= i.orderIndex) |
| 129 | return true; |
| 130 | prev = &i; |
| 131 | } |
| 132 | return false; |
| 133 | } |
| 134 | |
| 135 | /// Recomputes the ordering of child operations within the block. |
| 136 | void Block::recomputeOpOrder() { |
| 137 | parentValidOpOrderPair.setInt(true); |
| 138 | |
| 139 | unsigned orderIndex = 0; |
| 140 | for (auto &op : *this) |
| 141 | op.orderIndex = (orderIndex += Operation::kOrderStride); |
| 142 | } |
| 143 | |
| 144 | //===----------------------------------------------------------------------===// |
| 145 | // Argument list management. |
| 146 | //===----------------------------------------------------------------------===// |
| 147 | |
| 148 | /// Return a range containing the types of the arguments for this block. |
| 149 | auto Block::getArgumentTypes() -> ValueTypeRange<BlockArgListType> { |
| 150 | return ValueTypeRange<BlockArgListType>(getArguments()); |
| 151 | } |
| 152 | |
| 153 | BlockArgument Block::addArgument(Type type, Location loc) { |
| 154 | BlockArgument arg = BlockArgument::create(type, owner: this, index: arguments.size(), loc); |
| 155 | arguments.push_back(x: arg); |
| 156 | return arg; |
| 157 | } |
| 158 | |
| 159 | /// Add one argument to the argument list for each type specified in the list. |
| 160 | auto Block::addArguments(TypeRange types, ArrayRef<Location> locs) |
| 161 | -> iterator_range<args_iterator> { |
| 162 | assert(types.size() == locs.size() && |
| 163 | "incorrect number of block argument locations" ); |
| 164 | size_t initialSize = arguments.size(); |
| 165 | arguments.reserve(n: initialSize + types.size()); |
| 166 | |
| 167 | for (auto typeAndLoc : llvm::zip(t&: types, u&: locs)) |
| 168 | addArgument(type: std::get<0>(t&: typeAndLoc), loc: std::get<1>(t&: typeAndLoc)); |
| 169 | return {arguments.data() + initialSize, arguments.data() + arguments.size()}; |
| 170 | } |
| 171 | |
| 172 | BlockArgument Block::insertArgument(unsigned index, Type type, Location loc) { |
| 173 | assert(index <= arguments.size() && "invalid insertion index" ); |
| 174 | |
| 175 | auto arg = BlockArgument::create(type, owner: this, index, loc); |
| 176 | arguments.insert(position: arguments.begin() + index, x: arg); |
| 177 | // Update the cached position for all the arguments after the newly inserted |
| 178 | // one. |
| 179 | ++index; |
| 180 | for (BlockArgument arg : llvm::drop_begin(RangeOrContainer&: arguments, N: index)) |
| 181 | arg.setArgNumber(index++); |
| 182 | return arg; |
| 183 | } |
| 184 | |
| 185 | /// Insert one value to the given position of the argument list. The existing |
| 186 | /// arguments are shifted. The block is expected not to have predecessors. |
| 187 | BlockArgument Block::insertArgument(args_iterator it, Type type, Location loc) { |
| 188 | assert(getPredecessors().empty() && |
| 189 | "cannot insert arguments to blocks with predecessors" ); |
| 190 | return insertArgument(index: it->getArgNumber(), type, loc); |
| 191 | } |
| 192 | |
| 193 | void Block::eraseArgument(unsigned index) { |
| 194 | assert(index < arguments.size()); |
| 195 | arguments[index].destroy(); |
| 196 | arguments.erase(position: arguments.begin() + index); |
| 197 | for (BlockArgument arg : llvm::drop_begin(RangeOrContainer&: arguments, N: index)) |
| 198 | arg.setArgNumber(index++); |
| 199 | } |
| 200 | |
| 201 | void Block::eraseArguments(unsigned start, unsigned num) { |
| 202 | assert(start + num <= arguments.size()); |
| 203 | for (unsigned i = 0; i < num; ++i) |
| 204 | arguments[start + i].destroy(); |
| 205 | arguments.erase(first: arguments.begin() + start, last: arguments.begin() + start + num); |
| 206 | for (BlockArgument arg : llvm::drop_begin(RangeOrContainer&: arguments, N: start)) |
| 207 | arg.setArgNumber(start++); |
| 208 | } |
| 209 | |
| 210 | void Block::eraseArguments(const BitVector &eraseIndices) { |
| 211 | eraseArguments( |
| 212 | shouldEraseFn: [&](BlockArgument arg) { return eraseIndices.test(Idx: arg.getArgNumber()); }); |
| 213 | } |
| 214 | |
| 215 | void Block::eraseArguments(function_ref<bool(BlockArgument)> shouldEraseFn) { |
| 216 | auto firstDead = llvm::find_if(Range&: arguments, P: shouldEraseFn); |
| 217 | if (firstDead == arguments.end()) |
| 218 | return; |
| 219 | |
| 220 | // Destroy the first dead argument, this avoids reapplying the predicate to |
| 221 | // it. |
| 222 | unsigned index = firstDead->getArgNumber(); |
| 223 | firstDead->destroy(); |
| 224 | |
| 225 | // Iterate the remaining arguments to remove any that are now dead. |
| 226 | for (auto it = std::next(x: firstDead), e = arguments.end(); it != e; ++it) { |
| 227 | // Destroy dead arguments, and shift those that are still live. |
| 228 | if (shouldEraseFn(*it)) { |
| 229 | it->destroy(); |
| 230 | } else { |
| 231 | it->setArgNumber(index++); |
| 232 | *firstDead++ = *it; |
| 233 | } |
| 234 | } |
| 235 | arguments.erase(first: firstDead, last: arguments.end()); |
| 236 | } |
| 237 | |
| 238 | //===----------------------------------------------------------------------===// |
| 239 | // Terminator management |
| 240 | //===----------------------------------------------------------------------===// |
| 241 | |
| 242 | /// Get the terminator operation of this block. This function asserts that |
| 243 | /// the block might have a valid terminator operation. |
| 244 | Operation *Block::getTerminator() { |
| 245 | assert(mightHaveTerminator()); |
| 246 | return &back(); |
| 247 | } |
| 248 | |
| 249 | /// Check whether this block might have a terminator. |
| 250 | bool Block::mightHaveTerminator() { |
| 251 | return !empty() && back().mightHaveTrait<OpTrait::IsTerminator>(); |
| 252 | } |
| 253 | |
| 254 | // Indexed successor access. |
| 255 | unsigned Block::getNumSuccessors() { |
| 256 | return empty() ? 0 : back().getNumSuccessors(); |
| 257 | } |
| 258 | |
| 259 | Block *Block::getSuccessor(unsigned i) { |
| 260 | assert(i < getNumSuccessors()); |
| 261 | return getTerminator()->getSuccessor(index: i); |
| 262 | } |
| 263 | |
| 264 | /// If this block has exactly one predecessor, return it. Otherwise, return |
| 265 | /// null. |
| 266 | /// |
| 267 | /// Note that multiple edges from a single block (e.g. if you have a cond |
| 268 | /// branch with the same block as the true/false destinations) is not |
| 269 | /// considered to be a single predecessor. |
| 270 | Block *Block::getSinglePredecessor() { |
| 271 | auto it = pred_begin(); |
| 272 | if (it == pred_end()) |
| 273 | return nullptr; |
| 274 | auto *firstPred = *it; |
| 275 | ++it; |
| 276 | return it == pred_end() ? firstPred : nullptr; |
| 277 | } |
| 278 | |
| 279 | /// If this block has a unique predecessor, i.e., all incoming edges originate |
| 280 | /// from one block, return it. Otherwise, return null. |
| 281 | Block *Block::getUniquePredecessor() { |
| 282 | auto it = pred_begin(), e = pred_end(); |
| 283 | if (it == e) |
| 284 | return nullptr; |
| 285 | |
| 286 | // Check for any conflicting predecessors. |
| 287 | auto *firstPred = *it; |
| 288 | for (++it; it != e; ++it) |
| 289 | if (*it != firstPred) |
| 290 | return nullptr; |
| 291 | return firstPred; |
| 292 | } |
| 293 | |
| 294 | //===----------------------------------------------------------------------===// |
| 295 | // Other |
| 296 | //===----------------------------------------------------------------------===// |
| 297 | |
| 298 | /// Split the block into two blocks before the specified operation or |
| 299 | /// iterator. |
| 300 | /// |
| 301 | /// Note that all operations BEFORE the specified iterator stay as part of |
| 302 | /// the original basic block, and the rest of the operations in the original |
| 303 | /// block are moved to the new block, including the old terminator. The |
| 304 | /// original block is left without a terminator. |
| 305 | /// |
| 306 | /// The newly formed Block is returned, and the specified iterator is |
| 307 | /// invalidated. |
| 308 | Block *Block::splitBlock(iterator splitBefore) { |
| 309 | // Start by creating a new basic block, and insert it immediate after this |
| 310 | // one in the containing region. |
| 311 | auto *newBB = new Block(); |
| 312 | getParent()->getBlocks().insert(where: std::next(x: Region::iterator(this)), New: newBB); |
| 313 | |
| 314 | // Move all of the operations from the split point to the end of the region |
| 315 | // into the new block. |
| 316 | newBB->getOperations().splice(where: newBB->end(), L2&: getOperations(), first: splitBefore, |
| 317 | last: end()); |
| 318 | return newBB; |
| 319 | } |
| 320 | |
| 321 | //===----------------------------------------------------------------------===// |
| 322 | // Predecessors |
| 323 | //===----------------------------------------------------------------------===// |
| 324 | |
| 325 | Block *PredecessorIterator::unwrap(BlockOperand &value) { |
| 326 | return value.getOwner()->getBlock(); |
| 327 | } |
| 328 | |
| 329 | /// Get the successor number in the predecessor terminator. |
| 330 | unsigned PredecessorIterator::getSuccessorIndex() const { |
| 331 | return I->getOperandNumber(); |
| 332 | } |
| 333 | |
| 334 | //===----------------------------------------------------------------------===// |
| 335 | // Successors |
| 336 | //===----------------------------------------------------------------------===// |
| 337 | |
| 338 | SuccessorRange::SuccessorRange() : SuccessorRange(nullptr, 0) {} |
| 339 | |
| 340 | SuccessorRange::SuccessorRange(Block *block) : SuccessorRange() { |
| 341 | if (block->empty() || llvm::hasSingleElement(C&: *block->getParent())) |
| 342 | return; |
| 343 | Operation *term = &block->back(); |
| 344 | if ((count = term->getNumSuccessors())) |
| 345 | base = term->getBlockOperands().data(); |
| 346 | } |
| 347 | |
| 348 | SuccessorRange::SuccessorRange(Operation *term) : SuccessorRange() { |
| 349 | if ((count = term->getNumSuccessors())) |
| 350 | base = term->getBlockOperands().data(); |
| 351 | } |
| 352 | |
| 353 | bool Block::isReachable(Block *other, SmallPtrSet<Block *, 16> &&except) { |
| 354 | assert(getParent() == other->getParent() && "expected same region" ); |
| 355 | if (except.contains(Ptr: other)) { |
| 356 | // Fast path: If `other` is in the `except` set, there can be no path from |
| 357 | // "this" to `other` (that does not pass through an excluded block). |
| 358 | return false; |
| 359 | } |
| 360 | SmallVector<Block *> worklist(succ_begin(), succ_end()); |
| 361 | while (!worklist.empty()) { |
| 362 | Block *next = worklist.pop_back_val(); |
| 363 | if (next == other) |
| 364 | return true; |
| 365 | // Note: `except` keeps track of already visited blocks. |
| 366 | if (!except.insert(Ptr: next).second) |
| 367 | continue; |
| 368 | worklist.append(in_start: next->succ_begin(), in_end: next->succ_end()); |
| 369 | } |
| 370 | return false; |
| 371 | } |
| 372 | |
| 373 | //===----------------------------------------------------------------------===// |
| 374 | // BlockRange |
| 375 | //===----------------------------------------------------------------------===// |
| 376 | |
| 377 | BlockRange::BlockRange(ArrayRef<Block *> blocks) : BlockRange(nullptr, 0) { |
| 378 | if ((count = blocks.size())) |
| 379 | base = blocks.data(); |
| 380 | } |
| 381 | |
| 382 | BlockRange::BlockRange(SuccessorRange successors) |
| 383 | : BlockRange(successors.begin().getBase(), successors.size()) {} |
| 384 | |
| 385 | /// See `llvm::detail::indexed_accessor_range_base` for details. |
| 386 | BlockRange::OwnerT BlockRange::offset_base(OwnerT object, ptrdiff_t index) { |
| 387 | if (auto *operand = llvm::dyn_cast_if_present<BlockOperand *>(Val&: object)) |
| 388 | return {operand + index}; |
| 389 | return {llvm::dyn_cast_if_present<Block *const *>(Val&: object) + index}; |
| 390 | } |
| 391 | |
| 392 | /// See `llvm::detail::indexed_accessor_range_base` for details. |
| 393 | Block *BlockRange::dereference_iterator(OwnerT object, ptrdiff_t index) { |
| 394 | if (const auto *operand = llvm::dyn_cast_if_present<BlockOperand *>(Val&: object)) |
| 395 | return operand[index].get(); |
| 396 | return llvm::dyn_cast_if_present<Block *const *>(Val&: object)[index]; |
| 397 | } |
| 398 | |