paddlex.cpp 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829
  1. // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include <math.h>
  15. #include <omp.h>
  16. #include <algorithm>
  17. #include <fstream>
  18. #include <cstring>
  19. #include "include/paddlex/paddlex.h"
  20. #include <opencv2/core/core.hpp>
  21. #include <opencv2/highgui/highgui.hpp>
  22. #include <opencv2/imgproc/imgproc.hpp>
  23. namespace PaddleX {
  24. void Model::create_predictor(const std::string& model_dir,
  25. bool use_gpu,
  26. bool use_trt,
  27. bool use_mkl,
  28. int mkl_thread_num,
  29. int gpu_id,
  30. std::string key,
  31. bool use_ir_optim) {
  32. paddle::AnalysisConfig config;
  33. std::string model_file = model_dir + OS_PATH_SEP + "__model__";
  34. std::string params_file = model_dir + OS_PATH_SEP + "__params__";
  35. std::string yaml_file = model_dir + OS_PATH_SEP + "model.yml";
  36. std::string yaml_input = "";
  37. #ifdef WITH_ENCRYPTION
  38. if (key != "") {
  39. model_file = model_dir + OS_PATH_SEP + "__model__.encrypted";
  40. params_file = model_dir + OS_PATH_SEP + "__params__.encrypted";
  41. yaml_file = model_dir + OS_PATH_SEP + "model.yml.encrypted";
  42. paddle_security_load_model(
  43. &config, key.c_str(), model_file.c_str(), params_file.c_str());
  44. yaml_input = decrypt_file(yaml_file.c_str(), key.c_str());
  45. }
  46. #endif
  47. if (yaml_input == "") {
  48. // read yaml file
  49. std::ifstream yaml_fin(yaml_file);
  50. yaml_fin.seekg(0, std::ios::end);
  51. size_t yaml_file_size = yaml_fin.tellg();
  52. yaml_input.assign(yaml_file_size, ' ');
  53. yaml_fin.seekg(0);
  54. yaml_fin.read(&yaml_input[0], yaml_file_size);
  55. }
  56. // load yaml file
  57. if (!load_config(yaml_input)) {
  58. std::cerr << "Parse file 'model.yml' failed!" << std::endl;
  59. exit(-1);
  60. }
  61. if (key == "") {
  62. config.SetModel(model_file, params_file);
  63. }
  64. if (use_mkl && !use_gpu) {
  65. if (name != "HRNet" && name != "DeepLabv3p" && name != "PPYOLO") {
  66. config.EnableMKLDNN();
  67. config.SetCpuMathLibraryNumThreads(mkl_thread_num);
  68. } else {
  69. std::cerr << "HRNet/DeepLabv3p/PPYOLO are not supported "
  70. << "for the use of mkldnn" << std::endl;
  71. }
  72. }
  73. if (use_gpu) {
  74. config.EnableUseGpu(100, gpu_id);
  75. } else {
  76. config.DisableGpu();
  77. }
  78. config.SwitchUseFeedFetchOps(false);
  79. config.SwitchSpecifyInputNames(true);
  80. // enable graph Optim
  81. #if defined(__arm__) || defined(__aarch64__)
  82. config.SwitchIrOptim(false);
  83. #else
  84. config.SwitchIrOptim(use_ir_optim);
  85. #endif
  86. // enable Memory Optim
  87. config.EnableMemoryOptim();
  88. if (use_trt && use_gpu) {
  89. config.EnableTensorRtEngine(
  90. 1 << 20 /* workspace_size*/,
  91. 32 /* max_batch_size*/,
  92. 20 /* min_subgraph_size*/,
  93. paddle::AnalysisConfig::Precision::kFloat32 /* precision*/,
  94. true /* use_static*/,
  95. false /* use_calib_mode*/);
  96. }
  97. predictor_ = std::move(CreatePaddlePredictor(config));
  98. }
  99. bool Model::load_config(const std::string& yaml_input) {
  100. YAML::Node config = YAML::Load(yaml_input);
  101. type = config["_Attributes"]["model_type"].as<std::string>();
  102. name = config["Model"].as<std::string>();
  103. std::string version = config["version"].as<std::string>();
  104. if (version[0] == '0') {
  105. std::cerr << "[Init] Version of the loaded model is lower than 1.0.0, "
  106. << "deployment cannot be done, please refer to "
  107. << "https://github.com/PaddlePaddle/PaddleX/blob/develop/docs"
  108. << "/tutorials/deploy/upgrade_version.md "
  109. << "to transfer version." << std::endl;
  110. return false;
  111. }
  112. bool to_rgb = true;
  113. if (config["TransformsMode"].IsDefined()) {
  114. std::string mode = config["TransformsMode"].as<std::string>();
  115. if (mode == "BGR") {
  116. to_rgb = false;
  117. } else if (mode != "RGB") {
  118. std::cerr << "[Init] Only 'RGB' or 'BGR' is supported for TransformsMode"
  119. << std::endl;
  120. return false;
  121. }
  122. }
  123. // build data preprocess stream
  124. transforms_.Init(config["Transforms"], to_rgb);
  125. // read label list
  126. labels.clear();
  127. for (const auto& item : config["_Attributes"]["labels"]) {
  128. int index = labels.size();
  129. labels[index] = item.as<std::string>();
  130. }
  131. if (config["_init_params"]["input_channel"].IsDefined()) {
  132. input_channel_ = config["_init_params"]["input_channel"].as<int>();
  133. } else {
  134. input_channel_ = 3;
  135. }
  136. return true;
  137. }
  138. bool Model::preprocess(const cv::Mat& input_im, ImageBlob* blob) {
  139. cv::Mat im = input_im.clone();
  140. if (!transforms_.Run(&im, blob)) {
  141. return false;
  142. }
  143. return true;
  144. }
  145. // use openmp
  146. bool Model::preprocess(const std::vector<cv::Mat>& input_im_batch,
  147. std::vector<ImageBlob>* blob_batch,
  148. int thread_num) {
  149. int batch_size = input_im_batch.size();
  150. bool success = true;
  151. thread_num = std::min(thread_num, batch_size);
  152. #pragma omp parallel for num_threads(thread_num)
  153. for (int i = 0; i < input_im_batch.size(); ++i) {
  154. cv::Mat im = input_im_batch[i].clone();
  155. if (!transforms_.Run(&im, &(*blob_batch)[i])) {
  156. success = false;
  157. }
  158. }
  159. return success;
  160. }
  161. bool Model::predict(const cv::Mat& im, ClsResult* result) {
  162. inputs_.clear();
  163. if (type == "detector") {
  164. std::cerr << "Loading model is a 'detector', DetResult should be passed to "
  165. "function predict()!" << std::endl;
  166. return false;
  167. } else if (type == "segmenter") {
  168. std::cerr << "Loading model is a 'segmenter', SegResult should be passed "
  169. "to function predict()!" << std::endl;
  170. return false;
  171. }
  172. // im preprocess
  173. if (!preprocess(im, &inputs_)) {
  174. std::cerr << "Preprocess failed!" << std::endl;
  175. return false;
  176. }
  177. // predict
  178. auto in_tensor = predictor_->GetInputTensor("image");
  179. int h = inputs_.new_im_size_[0];
  180. int w = inputs_.new_im_size_[1];
  181. in_tensor->Reshape({1, input_channel_, h, w});
  182. in_tensor->copy_from_cpu(inputs_.im_data_.data());
  183. predictor_->ZeroCopyRun();
  184. // get result
  185. auto output_names = predictor_->GetOutputNames();
  186. auto output_tensor = predictor_->GetOutputTensor(output_names[0]);
  187. std::vector<int> output_shape = output_tensor->shape();
  188. int size = 1;
  189. for (const auto& i : output_shape) {
  190. size *= i;
  191. }
  192. outputs_.resize(size);
  193. output_tensor->copy_to_cpu(outputs_.data());
  194. // postprocess
  195. auto ptr = std::max_element(std::begin(outputs_), std::end(outputs_));
  196. result->category_id = std::distance(std::begin(outputs_), ptr);
  197. result->score = *ptr;
  198. result->category = labels[result->category_id];
  199. return true;
  200. }
  201. bool Model::predict(const std::vector<cv::Mat>& im_batch,
  202. std::vector<ClsResult>* results,
  203. int thread_num) {
  204. for (auto& inputs : inputs_batch_) {
  205. inputs.clear();
  206. }
  207. if (type == "detector") {
  208. std::cerr << "Loading model is a 'detector', DetResult should be passed to "
  209. "function predict()!" << std::endl;
  210. return false;
  211. } else if (type == "segmenter") {
  212. std::cerr << "Loading model is a 'segmenter', SegResult should be passed "
  213. "to function predict()!" << std::endl;
  214. return false;
  215. }
  216. inputs_batch_.assign(im_batch.size(), ImageBlob());
  217. // preprocess
  218. if (!preprocess(im_batch, &inputs_batch_, thread_num)) {
  219. std::cerr << "Preprocess failed!" << std::endl;
  220. return false;
  221. }
  222. // predict
  223. int batch_size = im_batch.size();
  224. auto in_tensor = predictor_->GetInputTensor("image");
  225. int h = inputs_batch_[0].new_im_size_[0];
  226. int w = inputs_batch_[0].new_im_size_[1];
  227. in_tensor->Reshape({batch_size, input_channel_, h, w});
  228. std::vector<float> inputs_data(batch_size * input_channel_ * h * w);
  229. for (int i = 0; i < batch_size; ++i) {
  230. std::copy(inputs_batch_[i].im_data_.begin(),
  231. inputs_batch_[i].im_data_.end(),
  232. inputs_data.begin() + i * input_channel_ * h * w);
  233. }
  234. in_tensor->copy_from_cpu(inputs_data.data());
  235. // in_tensor->copy_from_cpu(inputs_.im_data_.data());
  236. predictor_->ZeroCopyRun();
  237. // get result
  238. auto output_names = predictor_->GetOutputNames();
  239. auto output_tensor = predictor_->GetOutputTensor(output_names[0]);
  240. std::vector<int> output_shape = output_tensor->shape();
  241. int size = 1;
  242. for (const auto& i : output_shape) {
  243. size *= i;
  244. }
  245. outputs_.resize(size);
  246. output_tensor->copy_to_cpu(outputs_.data());
  247. // postprocess
  248. (*results).clear();
  249. (*results).resize(batch_size);
  250. int single_batch_size = size / batch_size;
  251. for (int i = 0; i < batch_size; ++i) {
  252. auto start_ptr = std::begin(outputs_);
  253. auto end_ptr = std::begin(outputs_);
  254. std::advance(start_ptr, i * single_batch_size);
  255. std::advance(end_ptr, (i + 1) * single_batch_size);
  256. auto ptr = std::max_element(start_ptr, end_ptr);
  257. (*results)[i].category_id = std::distance(start_ptr, ptr);
  258. (*results)[i].score = *ptr;
  259. (*results)[i].category = labels[(*results)[i].category_id];
  260. }
  261. return true;
  262. }
  263. bool Model::predict(const cv::Mat& im, DetResult* result) {
  264. inputs_.clear();
  265. result->clear();
  266. if (type == "classifier") {
  267. std::cerr << "Loading model is a 'classifier', ClsResult should be passed "
  268. "to function predict()!" << std::endl;
  269. return false;
  270. } else if (type == "segmenter") {
  271. std::cerr << "Loading model is a 'segmenter', SegResult should be passed "
  272. "to function predict()!" << std::endl;
  273. return false;
  274. }
  275. // preprocess
  276. if (!preprocess(im, &inputs_)) {
  277. std::cerr << "Preprocess failed!" << std::endl;
  278. return false;
  279. }
  280. int h = inputs_.new_im_size_[0];
  281. int w = inputs_.new_im_size_[1];
  282. auto im_tensor = predictor_->GetInputTensor("image");
  283. im_tensor->Reshape({1, input_channel_, h, w});
  284. im_tensor->copy_from_cpu(inputs_.im_data_.data());
  285. if (name == "YOLOv3" || name == "PPYOLO") {
  286. auto im_size_tensor = predictor_->GetInputTensor("im_size");
  287. im_size_tensor->Reshape({1, 2});
  288. im_size_tensor->copy_from_cpu(inputs_.ori_im_size_.data());
  289. } else if (name == "FasterRCNN" || name == "MaskRCNN") {
  290. auto im_info_tensor = predictor_->GetInputTensor("im_info");
  291. auto im_shape_tensor = predictor_->GetInputTensor("im_shape");
  292. im_info_tensor->Reshape({1, 3});
  293. im_shape_tensor->Reshape({1, 3});
  294. float ori_h = static_cast<float>(inputs_.ori_im_size_[0]);
  295. float ori_w = static_cast<float>(inputs_.ori_im_size_[1]);
  296. float new_h = static_cast<float>(inputs_.new_im_size_[0]);
  297. float new_w = static_cast<float>(inputs_.new_im_size_[1]);
  298. float im_info[] = {new_h, new_w, inputs_.scale};
  299. float im_shape[] = {ori_h, ori_w, 1.0};
  300. im_info_tensor->copy_from_cpu(im_info);
  301. im_shape_tensor->copy_from_cpu(im_shape);
  302. }
  303. // predict
  304. predictor_->ZeroCopyRun();
  305. std::vector<float> output_box;
  306. auto output_names = predictor_->GetOutputNames();
  307. auto output_box_tensor = predictor_->GetOutputTensor(output_names[0]);
  308. std::vector<int> output_box_shape = output_box_tensor->shape();
  309. int size = 1;
  310. for (const auto& i : output_box_shape) {
  311. size *= i;
  312. }
  313. output_box.resize(size);
  314. output_box_tensor->copy_to_cpu(output_box.data());
  315. if (size < 6) {
  316. std::cerr << "[WARNING] There's no object detected." << std::endl;
  317. return true;
  318. }
  319. int num_boxes = size / 6;
  320. // box postprocess
  321. for (int i = 0; i < num_boxes; ++i) {
  322. Box box;
  323. box.category_id = static_cast<int>(round(output_box[i * 6]));
  324. box.category = labels[box.category_id];
  325. box.score = output_box[i * 6 + 1];
  326. float xmin = output_box[i * 6 + 2];
  327. float ymin = output_box[i * 6 + 3];
  328. float xmax = output_box[i * 6 + 4];
  329. float ymax = output_box[i * 6 + 5];
  330. float w = xmax - xmin + 1;
  331. float h = ymax - ymin + 1;
  332. box.coordinate = {xmin, ymin, w, h};
  333. result->boxes.push_back(std::move(box));
  334. }
  335. // mask postprocess
  336. if (name == "MaskRCNN") {
  337. std::vector<float> output_mask;
  338. auto output_mask_tensor = predictor_->GetOutputTensor(output_names[1]);
  339. std::vector<int> output_mask_shape = output_mask_tensor->shape();
  340. int masks_size = 1;
  341. for (const auto& i : output_mask_shape) {
  342. masks_size *= i;
  343. }
  344. int mask_pixels = output_mask_shape[2] * output_mask_shape[3];
  345. int classes = output_mask_shape[1];
  346. output_mask.resize(masks_size);
  347. output_mask_tensor->copy_to_cpu(output_mask.data());
  348. result->mask_resolution = output_mask_shape[2];
  349. for (int i = 0; i < result->boxes.size(); ++i) {
  350. Box* box = &result->boxes[i];
  351. box->mask.shape = {static_cast<int>(box->coordinate[2]),
  352. static_cast<int>(box->coordinate[3])};
  353. auto begin_mask =
  354. output_mask.data() + (i * classes + box->category_id) * mask_pixels;
  355. cv::Mat bin_mask(result->mask_resolution,
  356. result->mask_resolution,
  357. CV_32FC1,
  358. begin_mask);
  359. cv::resize(bin_mask,
  360. bin_mask,
  361. cv::Size(box->mask.shape[0], box->mask.shape[1]));
  362. cv::threshold(bin_mask, bin_mask, 0.5, 1, cv::THRESH_BINARY);
  363. auto mask_int_begin = reinterpret_cast<float*>(bin_mask.data);
  364. auto mask_int_end =
  365. mask_int_begin + box->mask.shape[0] * box->mask.shape[1];
  366. box->mask.data.assign(mask_int_begin, mask_int_end);
  367. }
  368. }
  369. return true;
  370. }
  371. bool Model::predict(const std::vector<cv::Mat>& im_batch,
  372. std::vector<DetResult>* results,
  373. int thread_num) {
  374. for (auto& inputs : inputs_batch_) {
  375. inputs.clear();
  376. }
  377. if (type == "classifier") {
  378. std::cerr << "Loading model is a 'classifier', ClsResult should be passed "
  379. "to function predict()!" << std::endl;
  380. return false;
  381. } else if (type == "segmenter") {
  382. std::cerr << "Loading model is a 'segmenter', SegResult should be passed "
  383. "to function predict()!" << std::endl;
  384. return false;
  385. }
  386. inputs_batch_.assign(im_batch.size(), ImageBlob());
  387. int batch_size = im_batch.size();
  388. // preprocess
  389. if (!preprocess(im_batch, &inputs_batch_, thread_num)) {
  390. std::cerr << "Preprocess failed!" << std::endl;
  391. return false;
  392. }
  393. // RCNN model padding
  394. if (batch_size > 1) {
  395. if (name == "FasterRCNN" || name == "MaskRCNN") {
  396. int max_h = -1;
  397. int max_w = -1;
  398. for (int i = 0; i < batch_size; ++i) {
  399. max_h = std::max(max_h, inputs_batch_[i].new_im_size_[0]);
  400. max_w = std::max(max_w, inputs_batch_[i].new_im_size_[1]);
  401. // std::cout << "(" << inputs_batch_[i].new_im_size_[0]
  402. // << ", " << inputs_batch_[i].new_im_size_[1]
  403. // << ")" << std::endl;
  404. }
  405. thread_num = std::min(thread_num, batch_size);
  406. #pragma omp parallel for num_threads(thread_num)
  407. for (int i = 0; i < batch_size; ++i) {
  408. int h = inputs_batch_[i].new_im_size_[0];
  409. int w = inputs_batch_[i].new_im_size_[1];
  410. int c = im_batch[i].channels();
  411. if (max_h != h || max_w != w) {
  412. std::vector<float> temp_buffer(c * max_h * max_w);
  413. float* temp_ptr = temp_buffer.data();
  414. float* ptr = inputs_batch_[i].im_data_.data();
  415. for (int cur_channel = c - 1; cur_channel >= 0; --cur_channel) {
  416. int ori_pos = cur_channel * h * w + (h - 1) * w;
  417. int des_pos = cur_channel * max_h * max_w + (h - 1) * max_w;
  418. int last_pos = cur_channel * h * w;
  419. for (; ori_pos >= last_pos; ori_pos -= w, des_pos -= max_w) {
  420. memcpy(temp_ptr + des_pos, ptr + ori_pos, w * sizeof(float));
  421. }
  422. }
  423. inputs_batch_[i].im_data_.swap(temp_buffer);
  424. inputs_batch_[i].new_im_size_[0] = max_h;
  425. inputs_batch_[i].new_im_size_[1] = max_w;
  426. }
  427. }
  428. }
  429. }
  430. int h = inputs_batch_[0].new_im_size_[0];
  431. int w = inputs_batch_[0].new_im_size_[1];
  432. auto im_tensor = predictor_->GetInputTensor("image");
  433. im_tensor->Reshape({batch_size, input_channel_, h, w});
  434. std::vector<float> inputs_data(batch_size * input_channel_ * h * w);
  435. for (int i = 0; i < batch_size; ++i) {
  436. std::copy(inputs_batch_[i].im_data_.begin(),
  437. inputs_batch_[i].im_data_.end(),
  438. inputs_data.begin() + i * input_channel_ * h * w);
  439. }
  440. im_tensor->copy_from_cpu(inputs_data.data());
  441. if (name == "YOLOv3" || name == "PPYOLO") {
  442. auto im_size_tensor = predictor_->GetInputTensor("im_size");
  443. im_size_tensor->Reshape({batch_size, 2});
  444. std::vector<int> inputs_data_size(batch_size * 2);
  445. for (int i = 0; i < batch_size; ++i) {
  446. std::copy(inputs_batch_[i].ori_im_size_.begin(),
  447. inputs_batch_[i].ori_im_size_.end(),
  448. inputs_data_size.begin() + 2 * i);
  449. }
  450. im_size_tensor->copy_from_cpu(inputs_data_size.data());
  451. } else if (name == "FasterRCNN" || name == "MaskRCNN") {
  452. auto im_info_tensor = predictor_->GetInputTensor("im_info");
  453. auto im_shape_tensor = predictor_->GetInputTensor("im_shape");
  454. im_info_tensor->Reshape({batch_size, 3});
  455. im_shape_tensor->Reshape({batch_size, 3});
  456. std::vector<float> im_info(3 * batch_size);
  457. std::vector<float> im_shape(3 * batch_size);
  458. for (int i = 0; i < batch_size; ++i) {
  459. float ori_h = static_cast<float>(inputs_batch_[i].ori_im_size_[0]);
  460. float ori_w = static_cast<float>(inputs_batch_[i].ori_im_size_[1]);
  461. float new_h = static_cast<float>(inputs_batch_[i].new_im_size_[0]);
  462. float new_w = static_cast<float>(inputs_batch_[i].new_im_size_[1]);
  463. im_info[i * 3] = new_h;
  464. im_info[i * 3 + 1] = new_w;
  465. im_info[i * 3 + 2] = inputs_batch_[i].scale;
  466. im_shape[i * 3] = ori_h;
  467. im_shape[i * 3 + 1] = ori_w;
  468. im_shape[i * 3 + 2] = 1.0;
  469. }
  470. im_info_tensor->copy_from_cpu(im_info.data());
  471. im_shape_tensor->copy_from_cpu(im_shape.data());
  472. }
  473. // predict
  474. predictor_->ZeroCopyRun();
  475. // get all box
  476. std::vector<float> output_box;
  477. auto output_names = predictor_->GetOutputNames();
  478. auto output_box_tensor = predictor_->GetOutputTensor(output_names[0]);
  479. std::vector<int> output_box_shape = output_box_tensor->shape();
  480. int size = 1;
  481. for (const auto& i : output_box_shape) {
  482. size *= i;
  483. }
  484. output_box.resize(size);
  485. output_box_tensor->copy_to_cpu(output_box.data());
  486. if (size < 6) {
  487. std::cerr << "[WARNING] There's no object detected." << std::endl;
  488. return true;
  489. }
  490. auto lod_vector = output_box_tensor->lod();
  491. int num_boxes = size / 6;
  492. // box postprocess
  493. (*results).clear();
  494. (*results).resize(batch_size);
  495. for (int i = 0; i < lod_vector[0].size() - 1; ++i) {
  496. for (int j = lod_vector[0][i]; j < lod_vector[0][i + 1]; ++j) {
  497. Box box;
  498. box.category_id = static_cast<int>(round(output_box[j * 6]));
  499. box.category = labels[box.category_id];
  500. box.score = output_box[j * 6 + 1];
  501. float xmin = output_box[j * 6 + 2];
  502. float ymin = output_box[j * 6 + 3];
  503. float xmax = output_box[j * 6 + 4];
  504. float ymax = output_box[j * 6 + 5];
  505. float w = xmax - xmin + 1;
  506. float h = ymax - ymin + 1;
  507. box.coordinate = {xmin, ymin, w, h};
  508. (*results)[i].boxes.push_back(std::move(box));
  509. }
  510. }
  511. // mask postprocess
  512. if (name == "MaskRCNN") {
  513. std::vector<float> output_mask;
  514. auto output_mask_tensor = predictor_->GetOutputTensor(output_names[1]);
  515. std::vector<int> output_mask_shape = output_mask_tensor->shape();
  516. int masks_size = 1;
  517. for (const auto& i : output_mask_shape) {
  518. masks_size *= i;
  519. }
  520. int mask_pixels = output_mask_shape[2] * output_mask_shape[3];
  521. int classes = output_mask_shape[1];
  522. output_mask.resize(masks_size);
  523. output_mask_tensor->copy_to_cpu(output_mask.data());
  524. int mask_idx = 0;
  525. for (int i = 0; i < lod_vector[0].size() - 1; ++i) {
  526. (*results)[i].mask_resolution = output_mask_shape[2];
  527. for (int j = 0; j < (*results)[i].boxes.size(); ++j) {
  528. Box* box = &(*results)[i].boxes[i];
  529. int category_id = box->category_id;
  530. box->mask.shape = {static_cast<int>(box->coordinate[2]),
  531. static_cast<int>(box->coordinate[3])};
  532. auto begin_mask =
  533. output_mask.data() + (i * classes + box->category_id) * mask_pixels;
  534. cv::Mat bin_mask(output_mask_shape[2],
  535. output_mask_shape[2],
  536. CV_32FC1,
  537. begin_mask);
  538. cv::resize(bin_mask,
  539. bin_mask,
  540. cv::Size(box->mask.shape[0], box->mask.shape[1]));
  541. cv::threshold(bin_mask, bin_mask, 0.5, 1, cv::THRESH_BINARY);
  542. auto mask_int_begin = reinterpret_cast<float*>(bin_mask.data);
  543. auto mask_int_end =
  544. mask_int_begin + box->mask.shape[0] * box->mask.shape[1];
  545. box->mask.data.assign(mask_int_begin, mask_int_end);
  546. mask_idx++;
  547. }
  548. }
  549. }
  550. return true;
  551. }
  552. bool Model::predict(const cv::Mat& im, SegResult* result) {
  553. result->clear();
  554. inputs_.clear();
  555. if (type == "classifier") {
  556. std::cerr << "Loading model is a 'classifier', ClsResult should be passed "
  557. "to function predict()!" << std::endl;
  558. return false;
  559. } else if (type == "detector") {
  560. std::cerr << "Loading model is a 'detector', DetResult should be passed to "
  561. "function predict()!" << std::endl;
  562. return false;
  563. }
  564. // preprocess
  565. if (!preprocess(im, &inputs_)) {
  566. std::cerr << "Preprocess failed!" << std::endl;
  567. return false;
  568. }
  569. int h = inputs_.new_im_size_[0];
  570. int w = inputs_.new_im_size_[1];
  571. auto im_tensor = predictor_->GetInputTensor("image");
  572. im_tensor->Reshape({1, input_channel_, h, w});
  573. im_tensor->copy_from_cpu(inputs_.im_data_.data());
  574. // predict
  575. predictor_->ZeroCopyRun();
  576. // get labelmap
  577. auto output_names = predictor_->GetOutputNames();
  578. auto output_label_tensor = predictor_->GetOutputTensor(output_names[0]);
  579. std::vector<int> output_label_shape = output_label_tensor->shape();
  580. int size = 1;
  581. for (const auto& i : output_label_shape) {
  582. size *= i;
  583. result->label_map.shape.push_back(i);
  584. }
  585. result->label_map.data.resize(size);
  586. output_label_tensor->copy_to_cpu(result->label_map.data.data());
  587. // get scoremap
  588. auto output_score_tensor = predictor_->GetOutputTensor(output_names[1]);
  589. std::vector<int> output_score_shape = output_score_tensor->shape();
  590. size = 1;
  591. for (const auto& i : output_score_shape) {
  592. size *= i;
  593. result->score_map.shape.push_back(i);
  594. }
  595. result->score_map.data.resize(size);
  596. output_score_tensor->copy_to_cpu(result->score_map.data.data());
  597. // get origin image result
  598. std::vector<uint8_t> label_map(result->label_map.data.begin(),
  599. result->label_map.data.end());
  600. cv::Mat mask_label(result->label_map.shape[1],
  601. result->label_map.shape[2],
  602. CV_8UC1,
  603. label_map.data());
  604. cv::Mat mask_score(result->score_map.shape[2],
  605. result->score_map.shape[3],
  606. CV_32FC1,
  607. result->score_map.data.data());
  608. int idx = 1;
  609. int len_postprocess = inputs_.im_size_before_resize_.size();
  610. for (std::vector<std::string>::reverse_iterator iter =
  611. inputs_.reshape_order_.rbegin();
  612. iter != inputs_.reshape_order_.rend();
  613. ++iter) {
  614. if (*iter == "padding") {
  615. auto before_shape = inputs_.im_size_before_resize_[len_postprocess - idx];
  616. inputs_.im_size_before_resize_.pop_back();
  617. auto padding_w = before_shape[0];
  618. auto padding_h = before_shape[1];
  619. mask_label = mask_label(cv::Rect(0, 0, padding_h, padding_w));
  620. mask_score = mask_score(cv::Rect(0, 0, padding_h, padding_w));
  621. } else if (*iter == "resize") {
  622. auto before_shape = inputs_.im_size_before_resize_[len_postprocess - idx];
  623. inputs_.im_size_before_resize_.pop_back();
  624. auto resize_w = before_shape[0];
  625. auto resize_h = before_shape[1];
  626. cv::resize(mask_label,
  627. mask_label,
  628. cv::Size(resize_h, resize_w),
  629. 0,
  630. 0,
  631. cv::INTER_NEAREST);
  632. cv::resize(mask_score,
  633. mask_score,
  634. cv::Size(resize_h, resize_w),
  635. 0,
  636. 0,
  637. cv::INTER_LINEAR);
  638. }
  639. ++idx;
  640. }
  641. result->label_map.data.assign(mask_label.begin<uint8_t>(),
  642. mask_label.end<uint8_t>());
  643. result->label_map.shape = {mask_label.rows, mask_label.cols};
  644. result->score_map.data.assign(mask_score.begin<float>(),
  645. mask_score.end<float>());
  646. result->score_map.shape = {mask_score.rows, mask_score.cols};
  647. return true;
  648. }
  649. bool Model::predict(const std::vector<cv::Mat>& im_batch,
  650. std::vector<SegResult>* results,
  651. int thread_num) {
  652. for (auto& inputs : inputs_batch_) {
  653. inputs.clear();
  654. }
  655. if (type == "classifier") {
  656. std::cerr << "Loading model is a 'classifier', ClsResult should be passed "
  657. "to function predict()!" << std::endl;
  658. return false;
  659. } else if (type == "detector") {
  660. std::cerr << "Loading model is a 'detector', DetResult should be passed to "
  661. "function predict()!" << std::endl;
  662. return false;
  663. }
  664. // preprocess
  665. inputs_batch_.assign(im_batch.size(), ImageBlob());
  666. if (!preprocess(im_batch, &inputs_batch_, thread_num)) {
  667. std::cerr << "Preprocess failed!" << std::endl;
  668. return false;
  669. }
  670. int batch_size = im_batch.size();
  671. (*results).clear();
  672. (*results).resize(batch_size);
  673. int h = inputs_batch_[0].new_im_size_[0];
  674. int w = inputs_batch_[0].new_im_size_[1];
  675. auto im_tensor = predictor_->GetInputTensor("image");
  676. im_tensor->Reshape({batch_size, input_channel_, h, w});
  677. std::vector<float> inputs_data(batch_size * input_channel_ * h * w);
  678. for (int i = 0; i < batch_size; ++i) {
  679. std::copy(inputs_batch_[i].im_data_.begin(),
  680. inputs_batch_[i].im_data_.end(),
  681. inputs_data.begin() + i * input_channel_ * h * w);
  682. }
  683. im_tensor->copy_from_cpu(inputs_data.data());
  684. // im_tensor->copy_from_cpu(inputs_.im_data_.data());
  685. // predict
  686. predictor_->ZeroCopyRun();
  687. // get labelmap
  688. auto output_names = predictor_->GetOutputNames();
  689. auto output_label_tensor = predictor_->GetOutputTensor(output_names[0]);
  690. std::vector<int> output_label_shape = output_label_tensor->shape();
  691. int size = 1;
  692. for (const auto& i : output_label_shape) {
  693. size *= i;
  694. }
  695. std::vector<int64_t> output_labels(size, 0);
  696. output_label_tensor->copy_to_cpu(output_labels.data());
  697. auto output_labels_iter = output_labels.begin();
  698. int single_batch_size = size / batch_size;
  699. for (int i = 0; i < batch_size; ++i) {
  700. (*results)[i].label_map.data.resize(single_batch_size);
  701. (*results)[i].label_map.shape.push_back(1);
  702. for (int j = 1; j < output_label_shape.size(); ++j) {
  703. (*results)[i].label_map.shape.push_back(output_label_shape[j]);
  704. }
  705. std::copy(output_labels_iter + i * single_batch_size,
  706. output_labels_iter + (i + 1) * single_batch_size,
  707. (*results)[i].label_map.data.data());
  708. }
  709. // get scoremap
  710. auto output_score_tensor = predictor_->GetOutputTensor(output_names[1]);
  711. std::vector<int> output_score_shape = output_score_tensor->shape();
  712. size = 1;
  713. for (const auto& i : output_score_shape) {
  714. size *= i;
  715. }
  716. std::vector<float> output_scores(size, 0);
  717. output_score_tensor->copy_to_cpu(output_scores.data());
  718. auto output_scores_iter = output_scores.begin();
  719. int single_batch_score_size = size / batch_size;
  720. for (int i = 0; i < batch_size; ++i) {
  721. (*results)[i].score_map.data.resize(single_batch_score_size);
  722. (*results)[i].score_map.shape.push_back(1);
  723. for (int j = 1; j < output_score_shape.size(); ++j) {
  724. (*results)[i].score_map.shape.push_back(output_score_shape[j]);
  725. }
  726. std::copy(output_scores_iter + i * single_batch_score_size,
  727. output_scores_iter + (i + 1) * single_batch_score_size,
  728. (*results)[i].score_map.data.data());
  729. }
  730. // get origin image result
  731. for (int i = 0; i < batch_size; ++i) {
  732. std::vector<uint8_t> label_map((*results)[i].label_map.data.begin(),
  733. (*results)[i].label_map.data.end());
  734. cv::Mat mask_label((*results)[i].label_map.shape[1],
  735. (*results)[i].label_map.shape[2],
  736. CV_8UC1,
  737. label_map.data());
  738. cv::Mat mask_score((*results)[i].score_map.shape[2],
  739. (*results)[i].score_map.shape[3],
  740. CV_32FC1,
  741. (*results)[i].score_map.data.data());
  742. int idx = 1;
  743. int len_postprocess = inputs_batch_[i].im_size_before_resize_.size();
  744. for (std::vector<std::string>::reverse_iterator iter =
  745. inputs_batch_[i].reshape_order_.rbegin();
  746. iter != inputs_batch_[i].reshape_order_.rend();
  747. ++iter) {
  748. if (*iter == "padding") {
  749. auto before_shape =
  750. inputs_batch_[i].im_size_before_resize_[len_postprocess - idx];
  751. inputs_batch_[i].im_size_before_resize_.pop_back();
  752. auto padding_w = before_shape[0];
  753. auto padding_h = before_shape[1];
  754. mask_label = mask_label(cv::Rect(0, 0, padding_h, padding_w));
  755. mask_score = mask_score(cv::Rect(0, 0, padding_h, padding_w));
  756. } else if (*iter == "resize") {
  757. auto before_shape =
  758. inputs_batch_[i].im_size_before_resize_[len_postprocess - idx];
  759. inputs_batch_[i].im_size_before_resize_.pop_back();
  760. auto resize_w = before_shape[0];
  761. auto resize_h = before_shape[1];
  762. cv::resize(mask_label,
  763. mask_label,
  764. cv::Size(resize_h, resize_w),
  765. 0,
  766. 0,
  767. cv::INTER_NEAREST);
  768. cv::resize(mask_score,
  769. mask_score,
  770. cv::Size(resize_h, resize_w),
  771. 0,
  772. 0,
  773. cv::INTER_LINEAR);
  774. }
  775. ++idx;
  776. }
  777. (*results)[i].label_map.data.assign(mask_label.begin<uint8_t>(),
  778. mask_label.end<uint8_t>());
  779. (*results)[i].label_map.shape = {mask_label.rows, mask_label.cols};
  780. (*results)[i].score_map.data.assign(mask_score.begin<float>(),
  781. mask_score.end<float>());
  782. (*results)[i].score_map.shape = {mask_score.rows, mask_score.cols};
  783. }
  784. return true;
  785. }
  786. } // namespace PaddleX