NCSDK OpenCV 4.0 support patch
Following patch add support to NCSDK to support OpenCV 4.0
diff --git a/CMakeLists.txt b/CMakeLists.txt index da7142c..94ed82e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -44,7 +44,7 @@ include(cmake/Dependencies.cmake) # ---[ Flags if(UNIX OR APPLE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -std=c++11") endif() caffe_set_caffe_link() diff --git a/src/caffe/layers/video_data_layer.cpp b/src/caffe/layers/video_data_layer.cpp index 80a055f..93aa0b6 100644 --- a/src/caffe/layers/video_data_layer.cpp +++ b/src/caffe/layers/video_data_layer.cpp @@ -52,12 +52,12 @@ void VideoDataLayer<Dtype>::DataLayerSetUp( if (!cap_.open(video_file)) { LOG(FATAL) << "Failed to open video: " << video_file; } - total_frames_ = cap_.get(CV_CAP_PROP_FRAME_COUNT); + total_frames_ = cap_.get(cv::CAP_PROP_FRAME_COUNT); processed_frames_ = 0; // Read image to infer shape. cap_ >> cv_img; // Set index back to the first frame. - cap_.set(CV_CAP_PROP_POS_FRAMES, 0); + cap_.set(cv::CAP_PROP_POS_FRAMES, 0); } else { LOG(FATAL) << "Unknow video type!"; } diff --git a/src/caffe/util/bbox_util.cpp b/src/caffe/util/bbox_util.cpp index 3d6ab9a..81ba6e0 100644 --- a/src/caffe/util/bbox_util.cpp +++ b/src/caffe/util/bbox_util.cpp @@ -2183,7 +2183,7 @@ void VisualizeBBox(const vector<cv::Mat>& images, const Blob<Dtype>* detections, &baseline); cv::rectangle(image, cv::Point(0, 0), cv::Point(text.width, text.height + baseline), - CV_RGB(255, 255, 255), CV_FILLED); + CV_RGB(255, 255, 255), cv::FILLED); cv::putText(image, buffer, cv::Point(0, text.height + baseline / 2.), fontface, scale, CV_RGB(0, 0, 0), thickness, 8); // Draw bboxes. @@ -2209,7 +2209,7 @@ void VisualizeBBox(const vector<cv::Mat>& images, const Blob<Dtype>* detections, cv::rectangle( image, bottom_left_pt + cv::Point(0, 0), bottom_left_pt + cv::Point(text.width, -text.height-baseline), - color, CV_FILLED); + color, cv::FILLED); cv::putText(image, buffer, bottom_left_pt - cv::Point(0, baseline), fontface, scale, CV_RGB(0, 0, 0), thickness, 8); } @@ -2218,7 +2218,7 @@ void VisualizeBBox(const vector<cv::Mat>& images, const Blob<Dtype>* detections, if (!save_file.empty()) { if (!cap_out.isOpened()) { cv::Size size(image.size().width, image.size().height); - cv::VideoWriter outputVideo(save_file, CV_FOURCC('D', 'I', 'V', 'X'), + cv::VideoWriter outputVideo(save_file, -1, 30, size, true); cap_out = outputVideo; } diff --git a/src/caffe/util/im_transforms.cpp b/src/caffe/util/im_transforms.cpp index 5d282f3..fe9f9b7 100644 --- a/src/caffe/util/im_transforms.cpp +++ b/src/caffe/util/im_transforms.cpp @@ -243,12 +243,12 @@ void CenterObjectAndFillBg(const cv::Mat& in_img, const bool fill_bg, cv::Mat mask, crop_mask; if (in_img.channels() > 1) { cv::Mat in_img_gray; - cv::cvtColor(in_img, in_img_gray, CV_BGR2GRAY); + cv::cvtColor(in_img, in_img_gray, cv::COLOR_BGR2GRAY); cv::threshold(in_img_gray, mask, 0, 255, - CV_THRESH_BINARY_INV | CV_THRESH_OTSU); + cv::THRESH_BINARY_INV | cv::THRESH_OTSU); } else { cv::threshold(in_img, mask, 0, 255, - CV_THRESH_BINARY_INV | CV_THRESH_OTSU); + cv::THRESH_BINARY_INV | cv::THRESH_OTSU); } cv::Rect crop_rect = CropMask(mask, mask.at<uchar>(0, 0), 2); @@ -426,8 +426,8 @@ cv::Mat ApplyNoise(const cv::Mat& in_img, const NoiseParameter& param) { if (param.decolorize()) { cv::Mat grayscale_img; - cv::cvtColor(in_img, grayscale_img, CV_BGR2GRAY); - cv::cvtColor(grayscale_img, out_img, CV_GRAY2BGR); + cv::cvtColor(in_img, grayscale_img, cv::COLOR_BGR2GRAY); + cv::cvtColor(grayscale_img, out_img, cv::COLOR_GRAY2BGR); } else { out_img = in_img; } @@ -439,7 +439,7 @@ cv::Mat ApplyNoise(const cv::Mat& in_img, const NoiseParameter& param) { if (param.hist_eq()) { if (out_img.channels() > 1) { cv::Mat ycrcb_image; - cv::cvtColor(out_img, ycrcb_image, CV_BGR2YCrCb); + cv::cvtColor(out_img, ycrcb_image, cv::COLOR_BGR2YCrCb); // Extract the L channel vector<cv::Mat> ycrcb_planes(3); cv::split(ycrcb_image, ycrcb_planes); @@ -449,7 +449,7 @@ cv::Mat ApplyNoise(const cv::Mat& in_img, const NoiseParameter& param) { ycrcb_planes[0] = dst; cv::merge(ycrcb_planes, ycrcb_image); // convert back to RGB - cv::cvtColor(ycrcb_image, out_img, CV_YCrCb2BGR); + cv::cvtColor(ycrcb_image, out_img, cv::COLOR_YCrCb2BGR); } else { cv::Mat temp_img; cv::equalizeHist(out_img, temp_img); @@ -462,7 +462,7 @@ cv::Mat ApplyNoise(const cv::Mat& in_img, const NoiseParameter& param) { clahe->setClipLimit(4); if (out_img.channels() > 1) { cv::Mat ycrcb_image; - cv::cvtColor(out_img, ycrcb_image, CV_BGR2YCrCb); + cv::cvtColor(out_img, ycrcb_image, cv::COLOR_BGR2YCrCb); // Extract the L channel vector<cv::Mat> ycrcb_planes(3); cv::split(ycrcb_image, ycrcb_planes); @@ -472,7 +472,7 @@ cv::Mat ApplyNoise(const cv::Mat& in_img, const NoiseParameter& param) { ycrcb_planes[0] = dst; cv::merge(ycrcb_planes, ycrcb_image); // convert back to RGB - cv::cvtColor(ycrcb_image, out_img, CV_YCrCb2BGR); + cv::cvtColor(ycrcb_image, out_img, cv::COLOR_YCrCb2BGR); } else { cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(); clahe->setClipLimit(4); @@ -485,10 +485,10 @@ cv::Mat ApplyNoise(const cv::Mat& in_img, const NoiseParameter& param) { if (param.jpeg() > 0) { vector<uchar> buf; vector<int> params; - params.push_back(CV_IMWRITE_JPEG_QUALITY); + params.push_back(cv::IMWRITE_JPEG_QUALITY); params.push_back(param.jpeg()); cv::imencode(".jpg", out_img, buf, params); - out_img = cv::imdecode(buf, CV_LOAD_IMAGE_COLOR); + out_img = cv::imdecode(buf, cv::IMREAD_COLOR); } if (param.erode()) { @@ -536,14 +536,14 @@ cv::Mat ApplyNoise(const cv::Mat& in_img, const NoiseParameter& param) { if (param.convert_to_hsv()) { cv::Mat hsv_image; - cv::cvtColor(out_img, hsv_image, CV_BGR2HSV); + cv::cvtColor(out_img, hsv_image, cv::COLOR_BGR2HSV); out_img = hsv_image; } if (param.convert_to_lab()) { cv::Mat lab_image; out_img.convertTo(lab_image, CV_32F); lab_image *= 1.0 / 255; - cv::cvtColor(lab_image, out_img, CV_BGR2Lab); + cv::cvtColor(lab_image, out_img, cv::COLOR_BGR2Lab); } return out_img; } @@ -614,7 +614,7 @@ void AdjustSaturation(const cv::Mat& in_img, const float delta, cv::Mat* out_img) { if (fabs(delta - 1.f) != 1e-3) { // Convert to HSV colorspae. - cv::cvtColor(in_img, *out_img, CV_BGR2HSV); + cv::cvtColor(in_img, *out_img, cv::COLOR_BGR2HSV); // Split the image to 3 channels. vector<cv::Mat> channels; @@ -625,7 +625,7 @@ void AdjustSaturation(const cv::Mat& in_img, const float delta, cv::merge(channels, *out_img); // Back to BGR colorspace. - cvtColor(*out_img, *out_img, CV_HSV2BGR); + cvtColor(*out_img, *out_img, cv::COLOR_HSV2BGR); } else { *out_img = in_img; } @@ -648,7 +648,7 @@ void RandomHue(const cv::Mat& in_img, cv::Mat* out_img, void AdjustHue(const cv::Mat& in_img, const float delta, cv::Mat* out_img) { if (fabs(delta) > 0) { // Convert to HSV colorspae. - cv::cvtColor(in_img, *out_img, CV_BGR2HSV); + cv::cvtColor(in_img, *out_img, cv::COLOR_BGR2HSV); // Split the image to 3 channels. vector<cv::Mat> channels; @@ -659,7 +659,7 @@ void AdjustHue(const cv::Mat& in_img, const float delta, cv::Mat* out_img) { cv::merge(channels, *out_img); // Back to BGR colorspace. - cvtColor(*out_img, *out_img, CV_HSV2BGR); + cvtColor(*out_img, *out_img, cv::COLOR_HSV2BGR); } else { *out_img = in_img; }