Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions tmva/sofie/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ ROOT_STANDARD_LIBRARY_PACKAGE(ROOTTMVASofie
TMVA/ROperator_Einsum.hxx
TMVA/ROperator_Random.hxx
TMVA/ROperator_ScatterElements.hxx
TMVA/ROperator_ScatterND.hxx
TMVA/ROperator_Gather.hxx
TMVA/ROperator_GatherND.hxx
TMVA/ROperator_NonZero.hxx
Expand Down
59 changes: 55 additions & 4 deletions tmva/sofie/inc/TMVA/ROperator_BasicBinary.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,7 @@ public:
auto ret = UTILITY::MultidirectionalBroadcastShape(fShapeA, fShapeB);
fBroadcastFlag = ret.first;
fShapeY = ret.second;
auto lengthY = ConvertShapeToLength(fShapeY);
if (model.IsConstantTensor(fNA) && model.IsConstantTensor(fNB)) {
bool broadcast = fBroadcastFlag > 0;
if (broadcast) {
Expand Down Expand Up @@ -193,7 +194,7 @@ public:
const std::string &nameB = fNBroadcastedB.empty() ? fNB : fNBroadcastedB;
auto dataA = static_cast<T *>(model.GetInitializedTensorData(nameA).get());
auto dataB = static_cast<T *>(model.GetInitializedTensorData(nameB).get());
std::vector<T> dataY(ConvertShapeToLength(fShapeY));
std::vector<T> dataY(lengthY);
for (size_t i = 0; i < dataY.size(); i++) {
dataY[i] = BinaryOperatorTrait<T, Op>::Func(dataA[i], dataB[i]);
}
Expand All @@ -207,6 +208,59 @@ public:
<< " , " << fNB << " " << ConvertShapeToString(fShapeB) << " ---> " << fNY << " "
<< ConvertShapeToString(fShapeY) << " : " << ConvertValuesToString(dataY) << std::endl;
}
} else if (((model.IsShapeTensor(fNA) && model.IsShapeTensor(fNB)) ||
(model.IsShapeTensor(fNA) && model.IsConstantTensor(fNB)) ||
(model.IsShapeTensor(fNB) && model.IsConstantTensor(fNA)))
&& (fShapeA.size() <=1 && fShapeB.size() <=1 && model.GetTensorType(fNA) == ETensorType::INT64)) {
// case of shape tensors ( tensors are of rank 0 or 1 )
std::vector<Dim> dimValA;
std::vector<Dim> dimValB;
if (model.IsShapeTensor(fNA))
dimValA = model.GetShapeTensorValues(fNA);
if (model.IsShapeTensor(fNB))
dimValB = model.GetShapeTensorValues(fNB);
// adjust for broadcasting - repet values until it reaches shapes of Y
if (!fShapeY.empty() && fShapeY[0] > 1) {
if (dimValA.size() == 1) dimValA = std::vector<Dim>( fShapeY[0], dimValA[0]);
if (dimValB.size() == 1) dimValB = std::vector<Dim>( fShapeY[0], dimValB[0]);
}

auto convertDataToDim = [&](const std::string & name, const std::vector<size_t> & shape, std::vector<Dim> & dimValues) {
auto data = static_cast<int64_t *>(model.GetInitializedTensorData(name).get());
dimValues.resize(lengthY);
for (size_t i = 0; i < lengthY; i++) {
if (!shape.empty() && lengthY == shape[0])
dimValues[i] = Dim{ static_cast<size_t>(data[i])};
else // case dataA is a scalar
dimValues[i] = Dim{ static_cast<size_t>(data[0])};
}
};
if (model.IsConstantTensor(fNA)) {
convertDataToDim(fNA,fShapeA,dimValA);
} else if (model.IsConstantTensor(fNB)) {
convertDataToDim(fNB,fShapeB,dimValB);
}

//perform binary operations on shape tensors
std::vector<Dim> dimValY(lengthY);
for (size_t i = 0; i < lengthY; i++) {
if (!dimValA[i].isParam && !dimValB[i].isParam) {
size_t d = BinaryOperatorTrait<size_t, Op>::Func(dimValA[i].dim, dimValB[i].dim);
dimValY[i] = Dim{d};
} else {
auto res = BinaryOperatorTrait<T, Op>::Op(dimValA[i].GetVal(), dimValB[i].GetVal());
dimValY[i] = Dim{res, static_cast<size_t>(-1)};
}
}
model.AddShapeTensor(fNY,dimValY, fShapeY.empty()); // cannot be a scalar
if (model.Verbose()) {
std::cout << BinaryOperatorTrait<T, Op>::Name() << " : " << fNA << " " << ConvertShapeToString(fShapeA)
<< " , " << fNB << " " << ConvertShapeToString(fShapeB) << " ---> " << fNY << " "
<< ConvertShapeToString(fShapeY) << " : " << ConvertDimShapeToString(dimValY) << " (shape)" << std::endl;
}
// no code needs to be generated (flag this as a constant output tensor)
fIsOutputConstant = true;

} else {
// case of defined and non-constant tensors
model.AddIntermediateTensor(fNY, model.GetTensorType(fNA), fShapeY);
Expand Down Expand Up @@ -279,9 +333,6 @@ public:

opName = "op_" + opName;

if (fDimShapeY.empty()) {
throw std::runtime_error("TMVA SOFIE Binary Op called to Generate without being initialized first");
}
std::stringstream out;
out << SP << "\n//------ " << opName << " " << BinaryOperatorTrait<T, Op>::Name() << " --> "
<< ConvertDimShapeToString(fDimShapeY) << "\n";
Expand Down
19 changes: 10 additions & 9 deletions tmva/sofie/inc/TMVA/ROperator_Cast.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,14 @@ private:
std::string fNX;
std::string fNY;
std::vector<Dim> fShape;
std::string fAttrType = "float";
ETensorType fType;

public:
ROperator_Cast(){}
ROperator_Cast(std::string attr_type,std::string nameX, std::string nameY):
fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)),
fAttrType(attr_type) {
ROperator_Cast(ETensorType type,std::string nameX, std::string nameY):
fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)),
fType(type)
{
fInputTensorNames = { fNX };
fOutputTensorNames = { fNY };
}
Expand All @@ -51,21 +52,21 @@ public:
if (model.IsInitializedTensor(fNX)) {
fIsOutputConstant = true;
auto inputData = model.GetInitializedTensorData(fNX);
if (ConvertStringToType(fAttrType) == ETensorType::INT64) {
if (fType == ETensorType::INT64) {
model.AddConstantTensor<int64_t>(fNY, ConvertShapeToInt(fShape), static_cast<int64_t*>(inputData.get()));
model.SetNotWritableInitializedTensor(fNX);
}
else
fIsOutputConstant = false;
} else if (model.IsShapeTensor(fNX) && ConvertStringToType(fAttrType) == ETensorType::INT64) {
} else if (model.IsShapeTensor(fNX) && fType == ETensorType::INT64) {
auto shapeData = model.GetShapeTensorValues(fNX);
model.AddShapeTensor(fNY, shapeData, fShape.size() == 0);
fIsOutputConstant = true;
}
if (!fIsOutputConstant)
model.AddIntermediateTensor(fNY, ConvertStringToType(fAttrType), fShape);
model.AddIntermediateTensor(fNY, fType, fShape);
if (model.Verbose()) {
std::cout << "Cast : " << ConvertTypeToString(inputType) << " " << fNX << " -> " << fAttrType << " for " << fNY
std::cout << "Cast : " << ConvertTypeToString(inputType) << " " << fNX << " -> " << ConvertTypeToString(fType) << " for " << fNY
<< " shape " << ConvertDimShapeToString(fShape);
if (fIsOutputConstant) std::cout << " (constant) ";
std::cout << std::endl;
Expand All @@ -86,7 +87,7 @@ public:

out << SP << "for (int id = 0; id < " << length << " ; id++){\n";

out << SP << SP << "tensor_" << fNY << "[id] = static_cast<"<< fAttrType << ">(tensor_" << fNX << "[id]);\n";
out << SP << SP << "tensor_" << fNY << "[id] = static_cast<"<< ConvertTypeToString(fType) << ">(tensor_" << fNX << "[id]);\n";

out << SP << "}\n";
return out.str();
Expand Down
20 changes: 14 additions & 6 deletions tmva/sofie/inc/TMVA/ROperator_Gemm.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ namespace SOFIE{
}
if (fNC != ""){
if (model.CheckIfTensorAlreadyExist(fNC) == false){ //input must be a graph input, or already initialized intermediate tensor
throw std::runtime_error("TMVA SOFIE Gemm Op Input Tensor" + fNC + " is not found in model");
throw std::runtime_error("TMVA SOFIE Gemm Op Input Tensor " + fNC + " is not found in model");
}
}
if (model.IsDynamicTensor(fNA) || model.IsDimInputTensor(fNA) ) {
Expand Down Expand Up @@ -222,7 +222,7 @@ namespace SOFIE{
if (fIsDynamic && shapeY.empty())
broadcast_needed = true;
else
// consider broadcasting also if same length
// consider broadcasting also if hey have different length
broadcast_needed = (fShapeC != shapeY);


Expand Down Expand Up @@ -285,7 +285,12 @@ namespace SOFIE{
int64_t dimA = fShapeA.size();
int64_t dimB = fShapeB.size();
int64_t dimY = fShapeY.size();
if (dimA != dimB || dimA != dimY) {
int64_t dimC = fShapeC.size();
if (dimA != dimB || dimA != dimY || (fBroadcastBias && dimC != dimY)) {
std::cout << " shape A " << ConvertDimShapeToString(fShapeA)
<< " shape B " << ConvertDimShapeToString(fShapeB)
<< " shape C " << ConvertShapeToString(fShapeC)
<< " shape Y " << ConvertDimShapeToString(fShapeY) << std::endl;
throw std::runtime_error("TMVA SOFIE Gemm(MatMul) has invalid shape for inputs or output");
}
auto m = (fAttrTransA ? fShapeA[dimA-1].GetVal() : fShapeA[dimA-2].GetVal());
Expand Down Expand Up @@ -357,6 +362,9 @@ namespace SOFIE{
}
// do the bias broadcasting
if (fBroadcastBias) {
// also shapeC has prepended 1 to be same rank of Y
std::vector<size_t> sC = {fShapeC[dimC-2], fShapeC[dimC-1]};

fAttrBeta = 1.;
out << SP << "for (size_t j = 0; j < " << sY[0] << "; j++) { \n";
out << SP << SP << "size_t y_index = ";
Expand All @@ -369,11 +377,11 @@ namespace SOFIE{

out << SP << SP << "for (size_t k = 0; k < " << sY[1] << "; k++) { \n";
std::string bias_index;
if (fShapeC[0] == 1 && fShapeC[1] == sY[1].dim)
if (sC[0] == 1 && sC[1] == sY[1].dim)
bias_index = "k";
else if (fShapeC[1] == 1 && fShapeC[0] == sY[0].dim)
else if (sC[1] == 1 && sC[0] == sY[0].dim)
bias_index = "j";
else if (fShapeC[0] == 1 && fShapeC[1] == 1) // scalar case
else if (sC[0] == 1 && sC[1] == 1) // scalar case
bias_index = "0";
else {
throw std::runtime_error("TMVA SOFIE Gemm Op - invalid shape for bias tensor " + ConvertShapeToString(fShapeC));
Expand Down
7 changes: 2 additions & 5 deletions tmva/sofie/inc/TMVA/ROperator_Reduce.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ namespace SOFIE{

enum EReduceOpMode { ReduceMean, ReduceSum, ReduceSumSquare, ReduceProd, InvalidReduceOp };

template <typename T, EReduceOpMode Op>
template <EReduceOpMode Op>
class ROperator_Reduce final : public ROperator
{
private:
Expand Down Expand Up @@ -76,7 +76,7 @@ public:
std::sort(ax.begin(), ax.end());
for (size_t j = 0; j < ax.size(); j++) {
// erase reduced dimensions, but keep last one
if (outputShape.size() > 1) {
if (outputShape.size() > 0) {
outputShape.erase(outputShape.begin() + ax[j]);
for (size_t k = j+1; k < ax.size(); k++)
ax[k] -= 1; // decrease by one since we have removed a value
Expand Down Expand Up @@ -120,9 +120,6 @@ public:

std::string Generate(std::string opName) override {
opName = "op_" + opName;
if (fShapeX.empty() || fShapeY.empty()) {
throw std::runtime_error("TMVA SOFIE Reduce Op called to Generate without being initialized first");
}

auto inputLength = TMVA::Experimental::SOFIE::ConvertDimShapeToLength(fShapeX);
auto outputLength = TMVA::Experimental::SOFIE::ConvertDimShapeToLength(fShapeY);
Expand Down
Loading
Loading