Skip to content

Commit

Permalink
Merge pull request #2986 from alibaba/feature/bugfix
Browse files Browse the repository at this point in the history
Feature/bugfix
  • Loading branch information
jxt1234 authored Aug 8, 2024
2 parents e6042e5 + 8839467 commit b11b703
Show file tree
Hide file tree
Showing 8 changed files with 335 additions and 5 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.0)
cmake_minimum_required(VERSION 3.6)
# Versioning stuff
file(STRINGS "${CMAKE_CURRENT_LIST_DIR}/include/MNN/MNNDefine.h" MNN_DEFINE)
string(REGEX MATCH "MNN_VERSION_MAJOR [0-9]+" MNN_VERSION_MAJOR_DEFINE ${MNN_DEFINE})
Expand Down
3 changes: 1 addition & 2 deletions source/backend/metal/MetalLoop.mm
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,6 @@ virtual ErrorCode onResize(const std::vector<Tensor *>& inputs, const std::vecto
auto AStride = cmd->view()->GetAs<View>(1)->stride()->data();
auto BStride = cmd->view()->GetAs<View>(2)->stride()->data();
auto OStride = cmd->view()->GetAs<View>(0)->stride()->data();
int totalSize = mLoop->loopNumber() * size[0] * size[1] * size[2];
auto param = reinterpret_cast<VulkanBatchMatMulInfo*>([mParam contents]);
param->size[3] = mLoop->loopNumber();
for (int i=0; i<3; ++i) {
Expand All @@ -240,7 +239,7 @@ virtual void onEncode(const std::vector<Tensor *> &inputs, const std::vector<Ten
auto AStride = cmd->view()->GetAs<View>(1)->stride()->data();
auto BStride = cmd->view()->GetAs<View>(2)->stride()->data();
auto OStride = cmd->view()->GetAs<View>(0)->stride()->data();
int totalSize = mLoop->loopNumber() * size[0] * size[1] * size[2];
size_t totalSize = mLoop->loopNumber() * size[0] * size[2];
[encoder setComputePipelineState:mPipeline];
for (int i=0; i<cmd->indexes()->size(); ++i) {
MetalBackend::setTensor(mTensors[cmd->indexes()->data()[i]], encoder, i);
Expand Down
2 changes: 1 addition & 1 deletion source/backend/vulkan/buffer/execution/VulkanLoop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ class VulkanBatchMatMul : public VulkanBasicExecution {
auto AStride = cmd->view()->GetAs<View>(1)->stride()->data();
auto BStride = cmd->view()->GetAs<View>(2)->stride()->data();
auto OStride = cmd->view()->GetAs<View>(0)->stride()->data();
int totalSize = mLoop->loopNumber() * size[0] * size[1] * size[2];
int totalSize = mLoop->loopNumber() * size[0] * size[2];
auto param = reinterpret_cast<VulkanBatchMatMulInfo*>(mParam->map());
param->size[3] = mLoop->loopNumber();
auto vkBn = static_cast<VulkanBackend*>(backend());
Expand Down
14 changes: 14 additions & 0 deletions source/core/TensorUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -538,10 +538,24 @@ static bool _ClipDst(int* stride, int srcOffset, int dstOffset, const int* srcSi
if (0 != srcOffset || 0 != dstOffset) {
return false;
}
int srcMax = 0;
for (int i=0; i<sizeNum; ++i) {
srcMax += srcSize[i] * stride[i];
dstMin[i] = ALIMAX(0, -o[i]);
dstMax[i] = ALIMIN(srcSize[i]-o[i], dstSize[i]);
}
// Check If dstMax is inside src, it means one region can't describe dst - src
// TODO: Support slice region to support fuse
for (int i=0; i<sizeNum; ++i) {
if (dstMax[i] == dstSize[i]) {
continue;
}
int bias = offsetBias + dstMax[i] * stride[i];
if (bias < srcMax) {
// for [dstMax, dstSize], exist value match formula
return false;
}
}
return true;
}
static bool _RegionValid(int* stride, int offset, int* size, int sizeNum, size_t limitSize) {
Expand Down
2 changes: 1 addition & 1 deletion test/core/RegionFuse.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ class RegionFuseTest : public MNNTestCase {
// pad + transpose + slice + transpose (not full copy)
{0, 12321, 111, 1, 0, 12544, 112, 1, 32, 111, 111},
{113, 12544, 112, 1, 0, 12321, 111, 1, 32, 111, 111},
{112, 12321, 111, 1, 0, 12321, 111, 1, 32, 110, 110}
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
};
TensorUtils::FuseWrap fuseUtils;
for (int i = 0; i < N; i++) {
Expand Down
48 changes: 48 additions & 0 deletions test/op/SplitTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,3 +51,51 @@ class SplitTest : public MNNTestCase {
}
};
MNNTestSuiteRegister(SplitTest, "op/split");

class SliceTest : public MNNTestCase {
public:
virtual ~SliceTest() = default;
virtual bool run(int precision) {
auto input = _Input({2, 9, 4}, NCHW, halide_type_of<int>());
input->setName("input");
// set input data
auto size = input->getInfo()->size;
auto iptr = input->writeMap<int>();
for (int i=0; i<size; ++i) {
int ci = i % 4;
int co = i / 36;
int area = (i / 4) % 9;
iptr[i] = (ci+co*4) * 10 + area;
}
input->unMap();
auto inputTran = _Reshape(_Transpose(input, {0, 2, 1}), {8, 9}, NCHW);
std::vector<int> startDims = {1, 0};
std::vector<int> sizeDims = {4, 9};
auto start = _Const(startDims.data(), {2}, NCHW, halide_type_of<int>());
auto sizeVar = _Const(sizeDims.data(), {2}, NCHW, halide_type_of<int>());

auto output = _Slice(inputTran, start, sizeVar);
auto oinfo = output->getInfo();
if (oinfo->dim.size() != 2) {
FUNC_PRINT(1);
return false;
}
if (oinfo->dim[1] != 9 || oinfo->dim[0] != 4) {
FUNC_PRINT(1);
return false;
}
auto optr = output->readMap<int>();
for (int i=0; i<4; ++i) {
for (int j=0; j<9; ++j) {
int expect = (i+1)*10+j;
int compute = optr[i*9+j];
if (expect != compute) {
MNN_ERROR("Error for i=%d - j=%d, %d:%d\n", i, j, expect, compute);
return false;
}
}
}
return true;
}
};
MNNTestSuiteRegister(SliceTest, "op/slice");
3 changes: 3 additions & 0 deletions tools/cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ list(APPEND MNN_CPP_TOOLS SequenceModuleTest.out)
add_executable(mergeInplaceForCPU ${CMAKE_CURRENT_LIST_DIR}/mergeInplaceForCPU.cpp)
list(APPEND MNN_CPP_TOOLS mergeInplaceForCPU)

add_executable(modelCompare.out ${CMAKE_CURRENT_LIST_DIR}/modelCompare.cpp)
list(APPEND MNN_CPP_TOOLS modelCompare.out)

add_executable(MNNV2Basic.out ${CMAKE_CURRENT_LIST_DIR}/MNNV2Basic.cpp)
list(APPEND MNN_CPP_TOOLS MNNV2Basic.out)
if (MNN_USE_SSE)
Expand Down
266 changes: 266 additions & 0 deletions tools/cpp/modelCompare.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,266 @@
//
// modelCompare.cpp
// MNN
//
// Created by MNN on 2019/01/22.
// Copyright © 2018, Alibaba Group Holding Limited
//

#define MNN_OPEN_TIME_TRACE

#include <math.h>
#include <stdlib.h>
#include <algorithm>
#include <cstring>
#include <fstream>
#include <iostream>
#include <map>
#include <sstream>
#include <string>
#include <MNN/AutoTime.hpp>
#include <MNN/Interpreter.hpp>
#include <MNN/Tensor.hpp>
#include "core/TensorUtils.hpp"
#include "rapidjson/document.h"

template<typename T>
inline T stringConvert(const char* number) {
std::istringstream os(number);
T v;
os >> v;
return v;
}

using namespace MNN;

static void compareNet(Interpreter* net, Interpreter* net2, MNNForwardType expectType, float tolerance,
const std::map<std::string, std::shared_ptr<Tensor>>& inputs, const std::string& stopOp, BackendConfig::PrecisionMode precision, int modeNum) {
std::vector<std::shared_ptr<MNN::Tensor>> correctResult;
int index;
MNN::ScheduleConfig expectConfig;
BackendConfig backendConfig;
backendConfig.precision = precision;
expectConfig.type = expectType;
expectConfig.backendConfig = &backendConfig;
expectConfig.mode = modeNum;
auto expectSession = net->createSession(expectConfig);
auto compareSession = net2->createSession(expectConfig);

bool allCorrect = true;

MNN::TensorCallBackWithInfo beginCallBack = [&](const std::vector<MNN::Tensor*>& t, const OperatorInfo* op) {
if (op->name() == stopOp) {
return false;
}
return true;
};
MNN::TensorCallBackWithInfo saveExpect = [&](const std::vector<MNN::Tensor*>& t, const OperatorInfo* op) {
if (op->name() == stopOp) {
return false;
}
if (op->type() == "Raster") {
return true;
}
for (int i=0; i<t.size(); ++i) {
auto tensor = t[i];
if (tensor->elementSize() <= 0) {
return true;
}
if (tensor->buffer().device == 0 && tensor->buffer().host == nullptr) {
return true;
}
std::shared_ptr<MNN::Tensor> copyTensor(MNN::Tensor::createHostTensorFromDevice(tensor, true));
correctResult.emplace_back(copyTensor);
}
return true;
};
MNN::TensorCallBackWithInfo compareExpect = [&](const std::vector<MNN::Tensor*>& t, const OperatorInfo* op) {
if (op->name() == stopOp) {
return false;
}
if (op->type() == "Raster") {
return true;
}
for (int i=0; i<t.size(); ++i) {
auto tensor = t[i];
if (tensor->elementSize() <= 0) {
return true;
}
if (tensor->buffer().device == 0 && tensor->buffer().host == nullptr) {
return true;
}
std::shared_ptr<MNN::Tensor> copyTensor(MNN::Tensor::createHostTensorFromDevice(tensor, true));
auto expectTensor = correctResult[index++];
auto correct = TensorUtils::compareTensors(copyTensor.get(), expectTensor.get(), tolerance, true);
if (!correct) {
MNN_PRINT("%s - %d is error\n", op->name().c_str(), i);
allCorrect = false;
}
}
return allCorrect;
};

for (auto& iter : inputs) {
Tensor* expectInput = net->getSessionInput(expectSession, iter.first.empty() ? NULL : iter.first.c_str());
expectInput->copyFromHostTensor(iter.second.get());
Tensor* compareInput = net->getSessionInput(compareSession, iter.first.empty() ? NULL : iter.first.c_str());
compareInput->copyFromHostTensor(iter.second.get());
}
correctResult.clear();
net->runSessionWithCallBackInfo(expectSession, beginCallBack, saveExpect);
index = 0;
net2->runSessionWithCallBackInfo(compareSession, beginCallBack, compareExpect);
if (allCorrect) {
MNN_PRINT("Correct ! Run second pass\n");
} else {
return;
}
index = 0;
for (auto& iter : inputs) {
Tensor* compareInput = net->getSessionInput(compareSession, iter.first.empty() ? NULL : iter.first.c_str());
compareInput->copyFromHostTensor(iter.second.get());
}
net2->runSessionWithCallBackInfo(compareSession, beginCallBack, compareExpect);
if (allCorrect) {
MNN_PRINT("Correct !\n");
}
}

int main(int argc, const char* argv[]) {
if (argc < 3) {
MNN_PRINT("Usage: ./modelCompare.out origin.mnn origin_quant.mnn [0.05]");
}
// read args
std::string cmd = argv[0];
std::string pwd = "./";
auto rslash = cmd.rfind("/");
if (rslash != std::string::npos) {
pwd = cmd.substr(0, rslash + 1);
}

const char* fileName = argv[1];

const char* compareFileName = argv[2];

float tolerance = 0.05f;
if (argc > 3) {
tolerance = stringConvert<float>(argv[3]);
}
MNN_PRINT("Tolerance Rate: %f\n", tolerance);

// create net
MNN_PRINT("Open Model %s, %s\n", fileName, compareFileName);
std::shared_ptr<MNN::Interpreter> net =
std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(fileName));
net->setSessionMode(Interpreter::Session_Debug);
std::shared_ptr<MNN::Interpreter> net2 =
std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(compareFileName));
net2->setSessionMode(Interpreter::Session_Debug);

// create session for get input info
ScheduleConfig config;
config.type = MNN_FORWARD_CPU;
auto session = net->createSession(config);

std::map<std::string, std::shared_ptr<MNN::Tensor>> inputs;
std::vector<std::string> inputNames;
do {
rapidjson::Document document;
std::ostringstream jsonNameOs;
jsonNameOs << pwd << "/input.json";
std::ifstream fileNames(jsonNameOs.str().c_str());
if (fileNames.fail()) {
break;
}
std::ostringstream output;
output << fileNames.rdbuf();
auto outputStr = output.str();
document.Parse(outputStr.c_str());
if (document.HasParseError()) {
MNN_ERROR("Invalid json\n");
break;
}
if (document.HasMember("inputs")) {
auto inputsInfo = document["inputs"].GetArray();
for (auto iter = inputsInfo.begin(); iter !=inputsInfo.end(); iter++) {
auto obj = iter->GetObject();
std::string name = obj["name"].GetString();
inputNames.emplace_back(name);
}
}
} while (false);
if (!inputNames.empty()) {
MNN_PRINT("Find input.json, use inputs:");
for (auto& n : inputNames) {
MNN_PRINT(" %s, ", n.c_str());
}
MNN_PRINT("\n");
for (auto name : inputNames) {
auto inputTensor = net->getSessionInput(session, name.c_str());
std::shared_ptr<MNN::Tensor> givenTensor(new Tensor(inputTensor, inputTensor->getDimensionType()));
{
std::ostringstream fileName;
fileName << pwd << name << ".txt";
std::ifstream input(fileName.str().c_str());
MNN_ASSERT(!input.fail());

int size_w = inputTensor->width();
int size_h = inputTensor->height();
int bpp = inputTensor->channel();
int batch = inputTensor->batch();
// auto backend = net->getBackend(session, inputTensor);
// MNN_ASSERT(!input.fail());
MNN_PRINT("Input: %d,%d,%d,%d\n", size_w, size_h, bpp, batch);
auto inputData = givenTensor->host<float>();
auto size = givenTensor->size() / sizeof(float);
for (int i = 0; i < size; ++i) {
input >> inputData[i];
}
inputs.insert(std::make_pair(name, givenTensor));
}

}
} else {
auto inputTensor = net->getSessionInput(session, NULL);
std::shared_ptr<MNN::Tensor> givenTensor(new Tensor(inputTensor, inputTensor->getDimensionType()));
{
std::ostringstream fileName;
fileName << pwd << "input_0"
<< ".txt";
std::ifstream input(fileName.str().c_str());

int size_w = inputTensor->width();
int size_h = inputTensor->height();
int bpp = inputTensor->channel();
int batch = inputTensor->batch();
// auto backend = net->getBackend(session, inputTensor);
// MNN_ASSERT(!input.fail());
MNN_PRINT("Input: %d,%d,%d,%d\n", size_w, size_h, bpp, batch);
auto inputData = givenTensor->host<float>();
auto size = givenTensor->size() / sizeof(float);
for (int i = 0; i < size; ++i) {
input >> inputData[i];
}
inputs.insert(std::make_pair("", givenTensor));
}
}
net->releaseSession(session);
BackendConfig::PrecisionMode precision = BackendConfig::Precision_Normal;
if (argc > 4) {
precision = (BackendConfig::PrecisionMode)atoi(argv[4]);
}
FUNC_PRINT(precision);
int modeNum = 1;
if(argc > 5) {
modeNum = atoi(argv[5]);//set gpu mode
}
FUNC_PRINT(modeNum);
std::string stopOp = "";
if (argc > 6) {
stopOp = argv[6];
}
FUNC_PRINT_ALL(stopOp.c_str(), s);
compareNet(net.get(), net2.get(), MNN_FORWARD_CPU, tolerance, inputs, stopOp, precision, modeNum);

return 0;
}

0 comments on commit b11b703

Please sign in to comment.