Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/Dialect/Mlir/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ add_onnx_mlir_library(OMMlirDialects

LINK_LIBS PUBLIC
OMCompilerOptions
OMONNXOps
MLIRMathDialect
MLIRAffineDialect
MLIRSCFDialect
Expand Down
4 changes: 4 additions & 0 deletions src/Dialect/Mlir/IndexExpr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,8 @@ bool IndexExpr::hasAffineExpr() const { return getObj().hasAffineExpr(); }

bool IndexExpr::hasValue() const { return getObj().hasValue(); }

bool IndexExpr::hasDimParam() const { return getObj().hasDimParam(); }

//===----------------------------------------------------------------------===//
// IndexExpr list queries.
//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -413,6 +415,8 @@ AffineExpr IndexExpr::getAffineExpr() const {

Value IndexExpr::getValue() const { return getObj().getValue(); }

std::string IndexExpr::getDimParam() const { return getObj().getDimParam(); }

void IndexExpr::getAffineMapAndOperands(
AffineMap &map, SmallVectorImpl<Value> &operands) const {
assert(!isFloat() && "attempt to get affine map of a float index expr");
Expand Down
2 changes: 2 additions & 0 deletions src/Dialect/Mlir/IndexExpr.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -452,6 +452,7 @@ class IndexExpr {
}
bool hasAffineExpr() const;
bool hasValue() const;
bool hasDimParam() const;

// Value/values has/have to be literal and satisfy the test.
bool isLiteralAndIdenticalTo(int b) const; // Values equal.
Expand Down Expand Up @@ -486,6 +487,7 @@ class IndexExpr {
mlir::AffineMap &map, llvm::SmallVectorImpl<mlir::Value> &operands) const;
mlir::Value getValue() const;
int64_t getShape(bool uniqueQuestionMark = false) const;
std::string getDimParam() const;

// Helpers for list of IndexExpressions: given a (list of) IndexExpr, provide
// the (list of) Shape/Value/OpFoldResult corresponding to the original (list
Expand Down
7 changes: 6 additions & 1 deletion src/Dialect/Mlir/IndexExprBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,12 @@ IndexExpr IndexExprBuilder::getValFromArray(
else
return DimIE(castedVal);
}
return QuestionmarkIndexExpr(isFloat);

if (isFloat)
return QuestionmarkIndexExpr(isFloat);
else
// Try to get more info from array[i]
return QuestionmarkIndexExpr(array, i);
}

IndexExpr IndexExprBuilder::getIntAsSymbol(Value value) {
Expand Down
136 changes: 135 additions & 1 deletion src/Dialect/Mlir/IndexExprDetail.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Support/Debug.h"

#include "src/Dialect/ONNX/ONNXOps.hpp"
#include "src/Dialect/ONNX/ONNXOps/OpHelper.hpp"
#include <mutex>

#define DEBUG_TYPE "index-expr"
Expand All @@ -41,7 +43,7 @@ namespace onnx_mlir {
IndexExprImpl::IndexExprImpl()
: defined(false), literal(false), isFloat(false),
kind(IndexExprKind::NonAffine), intLit(0), affineExpr(nullptr),
value(nullptr) {
value(nullptr), dimParam("") {
// Set scope from thread private global.
scope = IndexExprScope::getCurrentScopePtr();
assert(scope && "expected IndexExpr Scope to be defined");
Expand All @@ -66,6 +68,109 @@ void IndexExprImpl::initAsQuestionmark(int64_t const val, bool isFloatFlag) {
IndexExprKind::Questionmark, val, AffineExpr(nullptr), Value(nullptr));
}

std::string getDimParamFromString(std::string dimParams, int64_t index) {
std::stringstream shapeInfoString(dimParams);
std::string dimString;
while (std::getline(shapeInfoString, dimString, ',')) {
size_t pos = dimString.find(':');
std::string inputString = dimString.substr(0, pos);
std::string paramString = dimString.substr(pos + 1);

int64_t inputID = std::stoi(inputString);
if (inputID == index) {
return (paramString);
}
}
return std::string("");
}

// Get DimParam from the direct defining op of the tensorOrMemref
std::string getDimParamFromDirectDefiningOpUtil(
Value tensorOrMemref, int64_t index) {
if (auto blockArg = llvm::dyn_cast<BlockArgument>(tensorOrMemref)) {
int64_t argIndex = blockArg.getArgNumber();
Block *block = blockArg.getOwner();
Operation *op = block->getParentOp();
if (op && llvm::isa<func::FuncOp>(op)) {
func::FuncOp funcOp = llvm::cast<func::FuncOp>(op);
DictionaryAttr dictAttr =
mlir::function_interface_impl::getArgAttrDict(funcOp, argIndex);
if (dictAttr && dictAttr.contains(FUNC_DIM_PARAMS)) {
StringAttr dimParamAttr = mlir::cast<StringAttr>(
dictAttr.getNamed(FUNC_DIM_PARAMS).value().getValue());
return getDimParamFromString(
std::string(dimParamAttr.getValue().str()), index);
}
return std::string("");
} else {
// ToFix, Loop, If and etc.
return std::string("");
}
} else {
Operation *op = tensorOrMemref.getDefiningOp();
if (!op) {
// func.func parameter?
return std::string("");
} else {
// Get the info from attribute "onnx.out_dim_param_*"
auto opResult = llvm::cast<OpResult>(tensorOrMemref);
unsigned resultIndex = opResult.getResultNumber();
Attribute dimParamAttr =
op->getAttr(OP_DIM_PARAMS + std::to_string(resultIndex));
if (!dimParamAttr)
return std::string("");
return getDimParamFromString(
std::string(llvm::cast<StringAttr>(dimParamAttr).getValue().str()),
index);
}
}
}

// Initialize a Questionmark with the value of val[index].
// Assume that the existing code handles the constant case already.
// Here a Questionmark is generated, perhaps with dimParam info.
// To find out the info for dimParam, the definition chain of val will be
// inspected. The possible pattern is value from ConcatOp.

std::string getDimParamForDimOp(Value val) {
auto dimOp = val.getDefiningOp<ONNXDimOp>();
if (dimOp) {
Value dataOfDim = dimOp.getData();
// Get the index of onnx.Dim
int64_t axis = dimOp.getAxis();
// return std::string(std::to_string(axis));
return getDimParamFromDirectDefiningOpUtil(dataOfDim, axis);
}
return std::string("");
}

static std::string getDimParamForIndexedValueUtil(Value val, int64_t index) {
// Pattern#1: The value comes from Concat. The index can be used to trace back
// the particular input of Concat.
// Copy code from src/Dialect/ONNX/ONNXOps/Tensor/Reshape
if (areDimsFromConcat(val)) {
SmallVector<Value> shapeDimVals;
// Question: need to check the shape of input of Concat?
getDims(val, shapeDimVals);
return getDimParamForDimOp(shapeDimVals[index]);
}
return std::string("");
}

std::string getDimParamUtil(Value tensorOrMemref, int64_t index) {
if (std::string resultString =
getDimParamFromDirectDefiningOpUtil(tensorOrMemref, index);
resultString != "") {
return resultString;
} else if (std::string resultString =
getDimParamForIndexedValueUtil(tensorOrMemref, index);
resultString != "") {
return resultString;
} else {
return std::string("");
}
}

// Used for runtime dims; integer by default.
void IndexExprImpl::initAsQuestionmark(Value tensorOrMemref, int64_t index) {
// Each question mark is assigned a unique integer that is obtained
Expand All @@ -78,6 +183,25 @@ void IndexExprImpl::initAsQuestionmark(Value tensorOrMemref, int64_t index) {
init(/*isDefined*/ true, /*literal*/ false,
/*isLitFloat, as this is for shapes*/ false, IndexExprKind::Questionmark,
questionValue, AffineExpr(nullptr), Value(nullptr));

// Get the dimSymbol from the dim_params
// This symbol acts similar to questionValue, but predefined from onnx model
std::string dimSymbol = getDimParamUtil(tensorOrMemref, index);
if (dimSymbol != "")
dimParam = dimSymbol;
}

void IndexExprImpl::initAsQuestionmarkForIndexedValue(
Value tensorOrMemref, int64_t index) {
llvm::hash_code questionValue = llvm::hash_combine(
mlir::hash_value(tensorOrMemref), llvm::hash_value(index));
init(/*isDefined*/ true, /*literal*/ false,
/*isLitFloat, as this is for shapes*/ false, IndexExprKind::Questionmark,
questionValue, AffineExpr(nullptr), Value(nullptr));

std::string dimSymbol = getDimParamForIndexedValueUtil(tensorOrMemref, index);
if (dimSymbol != "")
dimParam = dimSymbol;
}

void IndexExprImpl::initAsLiteral(int64_t const val, const IndexExprKind kind) {
Expand Down Expand Up @@ -329,6 +453,11 @@ bool IndexExprImpl::hasValue() const {
return value != nullptr;
}

bool IndexExprImpl::hasDimParam() const {
assert(isDefined());
return dimParam != "";
}

//===----------------------------------------------------------------------===//
// IndexExprExpr getters.
//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -508,6 +637,11 @@ Value IndexExprImpl::getValue() {
return value;
}

std::string IndexExprImpl::getDimParam() {
// Should it be only for QuestionMark?
return dimParam;
}

//===----------------------------------------------------------------------===//
// IndexExprExpr setters.
//===----------------------------------------------------------------------===//
Expand Down
8 changes: 8 additions & 0 deletions src/Dialect/Mlir/IndexExprDetail.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,11 @@ class IndexExprImpl {
// question mark is assigned to a unique value hashed from the given
// tensorOrMemref and dimension index.
void initAsQuestionmark(mlir::Value tensorOrMemref, int64_t index);
// Initialize a question mark for a indexed value, the value of val[index]
// In general this value could be constant. Here only symbolic case is
// considered, and the dim_param symbol is propagated, if exists.
// In constrast, the above function is to initialize with the shape.
void initAsQuestionmarkForIndexedValue(mlir::Value val, int64_t index);
void initAsLiteral(int64_t const value, IndexExprKind const kind);
void initAsLiteral(double const value, IndexExprKind const kind);
void initAsKind(mlir::Value const value, IndexExprKind const kind);
Expand All @@ -69,6 +74,7 @@ class IndexExprImpl {
bool isInCurrentScope() const;
bool hasAffineExpr() const;
bool hasValue() const;
bool hasDimParam() const;

// Getters.
IndexExprScope &getScope() const;
Expand All @@ -83,6 +89,7 @@ class IndexExprImpl {
void getAffineMapAndOperands(
mlir::AffineMap &map, llvm::SmallVectorImpl<mlir::Value> &operands);
mlir::Value getValue();
std::string getDimParam();

// Setters.
void setLiteral(int64_t val);
Expand Down Expand Up @@ -122,6 +129,7 @@ class IndexExprImpl {
mlir::AffineExpr affineExpr;
// Value expression, may be defined whenever the IndexExpr is defined.
mlir::Value value;
std::string dimParam;
};

} // namespace onnx_mlir
Expand Down
55 changes: 54 additions & 1 deletion src/Dialect/ONNX/ONNXOps/ShapeHelper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,37 @@ int64_t getAxisInRange(int64_t axis, Value val, bool includeRank) {
// ONNX Op Shape Helper
//===----------------------------------------------------------------------===//

static void refineDimParams(
Operation *op, DimsExpr &inferredDims, Value output) {
// Get the index of output
if (!llvm::isa<OpResult>(output)) {
// output is a block parameter. It could be Func, Loop, If and etc.
// Give up now due to the complicated control flow.
return;
}

auto opResult = llvm::cast<OpResult>(output);
unsigned resultIndex = opResult.getResultNumber();
std::string dimParamsStr("");
bool isFirst = true;
for (unsigned i = 0; i < inferredDims.size(); ++i) {
if (inferredDims[i].isQuestionmark() && inferredDims[i].hasDimParam()) {
if (isFirst) {
isFirst = false;
} else {
dimParamsStr.append(",");
}
dimParamsStr = dimParamsStr + std::to_string(i) + ":" +
inferredDims[i].getDimParam();
}
}
if (dimParamsStr == "")
return;
StringAttr dimParamsAttr = StringAttr::get(op->getContext(), dimParamsStr);
op->setAttr(
OP_DIM_PARAMS + std::to_string(resultIndex), StringAttr(dimParamsAttr));
}

/// Refine `inferredDims` using the output's shape if possible. For example,
/// replacing a dynamic dim in `inferredDims` by a static dim in the output's
/// shape.
Expand All @@ -85,7 +116,8 @@ static void refineDims(Operation *op, DimsExpr &inferredDims, Value output) {
"Inferred shape and existing shape are inconsistent in the number "
"of elements");

// Try to update inferredDim if existingDim is static.
refineDimParams(op, inferredDims, output);

for (unsigned i = 0; i < existingDims.size(); ++i) {
// Safety checks for old convention of using -1 for dynamic.
assert(existingDims[i] != -1 && "dynamic use kDynamic now");
Expand Down Expand Up @@ -377,6 +409,11 @@ LogicalResult ONNXBroadcastOpShapeHelper::customComputeShape(
continue;
}
// Case: QuestionMark - QuestionMark
if (currentDimExpr.hasDimParam() && nextDimExpr.hasDimParam() &&
currentDimExpr.getDimParam() == nextDimExpr.getDimParam()) {
// Same symbolic dim
continue;
}
if (!hasUniBroadcasting) {
dimsExpr[j] = IndexExpr::max(currentDimExpr, nextDimExpr);
}
Expand Down Expand Up @@ -404,6 +441,22 @@ bool ONNXBroadcastOpShapeHelper::hasNoBroadcast(DimAnalysis *dimAnalysis) {
// broadcasting for any reasons, hasNoBroadcast is set to false.
bool hasNoBroadcast = true;
for (uint64_t r = 0; r < outputRank && hasNoBroadcast; ++r) {
// Check with dim_param info: if all input of this dimension has same
// dim_param, sameDyn will remain true, and further check of this dimension
// is no needed.
DimsExpr dimsInput0 = inputsDims[0];
if (dimsInput0[r].isQuestionmark() && dimsInput0[r].hasDimParam()) {
bool sameDyn = true;
for (uint64_t i = 1; i < inputsDims.size(); i++) {
DimsExpr dims = inputsDims[i];
if (!(dims[r].isQuestionmark() && dims[r].hasDimParam() &&
dimsInput0[r].getDimParam() == dims[r].getDimParam())) {
sameDyn = false;
}
}
if (sameDyn)
continue;
}
bool hasOne, hasOtherThanOne;
hasOne = hasOtherThanOne = false;
for (DimsExpr dims : inputsDims) {
Expand Down
4 changes: 4 additions & 0 deletions src/Dialect/ONNX/ONNXOps/ShapeHelper.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,10 @@

namespace onnx_mlir {

// Define the attribute name for onnx.dim_param and its propagation
const std::string FUNC_DIM_PARAMS("onnx.dim_params");
const std::string OP_DIM_PARAMS("onnx.out_dim_params_");

//===----------------------------------------------------------------------===//
// Support functions.
//===----------------------------------------------------------------------===//
Expand Down
11 changes: 9 additions & 2 deletions src/Dialect/ONNX/ONNXOps/Tensor/Reshape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -159,8 +159,15 @@ LogicalResult ONNXReshapeOpShapeHelper::computeShape() {
for (unsigned i = 0; i < outputRank; ++i) {
if (hasShapeAndRank(data)) {
IndexExpr dimShape = createIE->getIntFromArrayAsSymbol(shape, i);
outputDims[i] = outputDims[i].selectOrSelf(
dimShape == -1, numOfElements.floorDiv(numOfElementsFromShape));
if (auto search = outputIgnoredDims.find(i);
search != outputIgnoredDims.end())
// The outputIgnoreDims are dim with symbolic value matching a dim in
// data. Therefore, it can not be -1. The current folding of IndexExp
// can not propagate the dim_param info.
outputDims[i] = dimShape;
else
outputDims[i] = outputDims[i].selectOrSelf(
dimShape == -1, numOfElements.floorDiv(numOfElementsFromShape));
} else {
// ToFix: can not check getAllowzero because the operandAdaptor is
// constructed without attributes
Expand Down
Loading
Loading