• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2    Copyright 2022 The StableHLO Authors.
3 
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7 
8     http://www.apache.org/licenses/LICENSE-2.0
9 
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 ==============================================================================*/
16 
17 #include "dialect/BroadcastUtils.h"
18 
19 #include <algorithm>
20 
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/Sequence.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "mlir/Dialect/Shape/IR/Shape.h"
25 
26 namespace mlir {
27 namespace hlo {
28 
isLegalNumpyRankedBroadcast(Value lhs,Value rhs,DenseIntElementsAttr broadcastDims)29 bool isLegalNumpyRankedBroadcast(Value lhs, Value rhs,
30                                  DenseIntElementsAttr broadcastDims) {
31   RankedTensorType lhsType = lhs.getType().dyn_cast<RankedTensorType>();
32   RankedTensorType rhsType = rhs.getType().dyn_cast<RankedTensorType>();
33   if (!lhsType || !rhsType) return false;
34   if (lhsType.getRank() == rhsType.getRank()) return true;
35 
36   // Otherwise, verify that broadcast_dims strictly performs left-padding.
37   auto smallerRank = std::min(lhsType.getRank(), rhsType.getRank());
38   auto largerRank = std::max(lhsType.getRank(), rhsType.getRank());
39 
40   if (smallerRank != broadcastDims.getNumElements()) {
41     return false;
42   }
43   auto expectedExtents =
44       llvm::seq<int64_t>(largerRank - smallerRank, largerRank);
45   return std::equal(expectedExtents.begin(), expectedExtents.end(),
46                     broadcastDims.value_begin<APInt>());
47 }
48 
computeBinaryElementwiseBroadcastingResultExtents(Location loc,Value lhs,Value rhs,OpBuilder & builder)49 Value computeBinaryElementwiseBroadcastingResultExtents(Location loc, Value lhs,
50                                                         Value rhs,
51                                                         OpBuilder& builder) {
52   return computeNaryElementwiseBroadcastingResultExtents(
53       loc, ValueRange{lhs, rhs}, builder);
54 }
55 
computeNaryElementwiseBroadcastingResultExtents(Location loc,ValueRange operands,OpBuilder & builder)56 Value computeNaryElementwiseBroadcastingResultExtents(Location loc,
57                                                       ValueRange operands,
58                                                       OpBuilder& builder) {
59   auto shapes = llvm::to_vector<4>(llvm::map_range(operands, [&](Value v) {
60     return builder.createOrFold<shape::ShapeOfOp>(loc, v);
61   }));
62 
63   int64_t resultRank = 0;
64   for (Value s : shapes) {
65     auto ty = s.getType().cast<RankedTensorType>();
66     assert(ty.getRank() == 1 && "expect extent tensor type");
67     if (ty.isDynamicDim(0)) {
68       resultRank = ShapedType::kDynamicSize;
69       break;
70     }
71     resultRank = std::max(resultRank, ty.getDimSize(0));
72   }
73   Type extentTensorTy =
74       shape::getExtentTensorType(builder.getContext(), resultRank);
75 
76   return builder.createOrFold<shape::BroadcastOp>(loc, extentTensorTy, shapes,
77                                                   /*error=*/nullptr);
78 }
79 
80 }  // namespace hlo
81 }  // namespace mlir
82