From 8da8ac91e283eace7660aaf8887de9d60026d099 Mon Sep 17 00:00:00 2001 From: Bernhard Tittelbach Date: Tue, 26 Mar 2019 10:20:11 +0100 Subject: [PATCH] rename project --- cpp/openCVnaiveObstacleAvoidance/CMakeLists.txt | 42 ++ .../openCVnaiveObstacleAvoidance.cpp | 454 ++++++++++++++++++++ cpp/sampleOpenCV/CMakeLists.txt | 42 -- cpp/sampleOpenCV/sampleOpenCV.cpp | 454 -------------------- 4 files changed, 496 insertions(+), 496 deletions(-) create mode 100644 cpp/openCVnaiveObstacleAvoidance/CMakeLists.txt create mode 100644 cpp/openCVnaiveObstacleAvoidance/openCVnaiveObstacleAvoidance.cpp delete mode 100644 cpp/sampleOpenCV/CMakeLists.txt delete mode 100644 cpp/sampleOpenCV/sampleOpenCV.cpp diff --git a/cpp/openCVnaiveObstacleAvoidance/CMakeLists.txt b/cpp/openCVnaiveObstacleAvoidance/CMakeLists.txt new file mode 100644 index 0000000..6b15659 --- /dev/null +++ b/cpp/openCVnaiveObstacleAvoidance/CMakeLists.txt @@ -0,0 +1,42 @@ +cmake_minimum_required(VERSION 2.8) + +# Please insert your OpenCV path +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "/usr") + +# This needs at least OpenCV 2.x, but it should also work with OpenCV 3.x +find_package(OpenCV) + +if(NOT OpenCV_FOUND) + message("OpenCV example will not be build as no OpenCV was found!") + return() +endif() + +set_target_properties(${OpenCV_LIBS} PROPERTIES MAP_IMPORTED_CONFIG_RELWITHDEBINFO RELEASE) +set_target_properties(${OpenCV_LIBS} PROPERTIES MAP_IMPORTED_CONFIG_MINSIZEREL RELEASE) + +#set(CMAKE_PREFIX_PATH "../../../share") +set(CMAKE_PREFIX_PATH "../../../libroyale-3.21.1.70-LINUX-x86-64Bit/share") + +find_package(royale REQUIRED) + +link_directories(${royale_LIB_DIR}) + +project (openCVnaiveObstacleAvoidance) + +include_directories( + ${CMAKE_CURRENT_SOURCE_DIR}/../../inc +) + +add_executable(openCVnaiveObstacleAvoidance + openCVnaiveObstacleAvoidance.cpp + ) + +if(WIN32) + if(OpenCV_STATIC) + target_compile_options(openCVnaiveObstacleAvoidance PRIVATE "/MT$<$:d>") + endif() +endif() + +COPY_ROYALE_LIBS(openCVnaiveObstacleAvoidance) + +target_link_libraries(openCVnaiveObstacleAvoidance "${royale_LIBS}" "${OpenCV_LIBS}") diff --git a/cpp/openCVnaiveObstacleAvoidance/openCVnaiveObstacleAvoidance.cpp b/cpp/openCVnaiveObstacleAvoidance/openCVnaiveObstacleAvoidance.cpp new file mode 100644 index 0000000..6170396 --- /dev/null +++ b/cpp/openCVnaiveObstacleAvoidance/openCVnaiveObstacleAvoidance.cpp @@ -0,0 +1,454 @@ +/****************************************************************************\ + * Copyright (C) 2017 Infineon Technologies & pmdtechnologies ag + * + * THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY + * KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A + * PARTICULAR PURPOSE. + * + \****************************************************************************/ + +#include +#include +#include +#include + +#include + +using namespace royale; +using namespace sample_utils; +using namespace std; +using namespace cv; + +//#define SHOW_GUI_WINDOWS + +// Linker errors for the OpenCV sample +// +// If this example gives linker errors about undefined references to cv::namedWindow and cv::imshow, +// or QFontEngine::glyphCache and qMessageFormatString (from OpenCV to Qt), it may be caused by a +// change in the compiler's C++ ABI. +// +// With Ubuntu and Debian's distribution packages, the libopencv packages that have 'v5' at the end +// of their name, for example libopencv-video2.4v5, are compatible with GCC 5 (and GCC 6), but +// incompatible with GCC 4.8 and GCC 4.9. The -dev packages don't have the postfix, but depend on +// the v5 (or non-v5) version of the corresponding lib package. When Ubuntu moves to OpenCV 3.0, +// they're likely to drop the postfix (but the packages will be for GCC 5 or later). +// +// If you are manually installing OpenCV or Qt, you need to ensure that the binaries were compiled +// with the same version of the compiler. The version number of the packages themselves doesn't say +// which ABI they use, it depends on which version of the compiler was used. + +class MyListener : public IDepthDataListener +{ + + static uint32_t const num_dist_columns_=4; + double latest_min_distance_[num_dist_columns_]; + double latest_min_distance_diff_[num_dist_columns_]; + + const int DELAY_BLUR = 100; + // const int MAX_KERNEL_LENGTH = 31; + const int MAX_KERNEL_LENGTH = 23; + + bool normblurImage = true; + + const uint8_t confidence_threshold_ = 0; + const double distance_threshold_ = 80; + +public : + + + MyListener() : + undistortImage (false) + { + } + + void onNewData (const DepthData *data) + { + // this callback function will be called for every new + // depth frame + + std::lock_guard lock (flagMutex); + + // create two images which will be filled afterwards + // each image containing one 32Bit channel + zImage.create (Size (data->width, data->height), CV_32FC1); +#ifdef SHOW_GUI_WINDOWS + grayImage.create (Size (data->width, data->height), CV_32FC1); + confidenceImage.create (Size (data->width, data->height), CV_8UC1); +#endif + + // set the image to zero + zImage = Scalar::all (0); +#ifdef SHOW_GUI_WINDOWS + grayImage = Scalar::all (0); +#endif + + int k = 0; + for (int y = 0; y < zImage.rows; y++) + { + float *zRowPtr = zImage.ptr (y); +#ifdef SHOW_GUI_WINDOWS + float *grayRowPtr = grayImage.ptr (y); + uint8_t *confRowPtr = confidenceImage.ptr (y); +#endif + for (int x = 0; x < zImage.cols; x++, k++) + { + auto curPoint = data->points.at (k); +#ifdef SHOW_GUI_WINDOWS + confRowPtr[x] = curPoint.depthConfidence; +#endif + if (curPoint.depthConfidence > confidence_threshold_) + { + // if the point is valid, map the pixel from 3D world + // coordinates to a 2D plane (this will distort the image) + zRowPtr[x] = adjustZValue (curPoint.z); +#ifdef SHOW_GUI_WINDOWS + grayRowPtr[x] = adjustGrayValue (curPoint.grayValue); +#endif + } else { + //asume point is as far away as possible and thus "SAFE" for obstacle avoidance + zRowPtr[x] = 255; +#ifdef SHOW_GUI_WINDOWS + grayRowPtr[x] = 255; +#endif + } + } + } + + // create images to store the 8Bit version (some OpenCV + // functions may only work on 8Bit images) + zImage8.create (Size (data->width, data->height), CV_8UC1); +#ifdef SHOW_GUI_WINDOWS + grayImage8.create (Size (data->width, data->height), CV_8UC1); +#endif + // convert images to the 8Bit version + // This sample uses a fixed scaling of the values to (0, 255) to avoid flickering. + // You can also replace this with an automatic scaling by using + // normalize(zImage, zImage8, 0, 255, NORM_MINMAX, CV_8UC1) + // normalize(grayImage, grayImage8, 0, 255, NORM_MINMAX, CV_8UC1) + zImage.convertTo (zImage8, CV_8UC1); +#ifdef SHOW_GUI_WINDOWS + grayImage.convertTo (grayImage8, CV_8UC1); +#endif + + if (undistortImage) + { + // call the undistortion function on the z image + Mat temp = zImage8.clone(); + undistort (temp, zImage8, cameraMatrix, distortionCoefficients); + } + + if (normblurImage) + { + auto temp = zImage8.clone(); + for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 ) + { + blur( temp, zImage8, Size( i, i ), Point(-1,-1) ); + } + } + + //// Debug: show column part of image + // Mat subimg = zImage8(Rect((zImage.cols/num_dist_columns_)*3,0, zImage.cols/num_dist_columns_ , zImage8.rows)); + // imshow ("column", subimg); + + //detect column distance + auto col_width = zImage.cols/num_dist_columns_; + for (uint32_t col=0; colwidth * 4, data->height * 4), CV_8UC1); + // resize (zImage8, scaledZImage, scaledZImage.size()); + // imshow ("Depth", scaledZImage); + +#ifdef SHOW_GUI_WINDOWS + imshow ("Depth", zImage8); + + // scale and display the gray image + // scaledGrayImage.create (Size (data->width * 4, data->height * 4), CV_8UC1); + // resize (grayImage8, scaledGrayImage, scaledGrayImage.size()); + // imshow ("Gray", scaledGrayImage); + + // if (undistortImage) + // { + // // call the undistortion function on the gray image + // Mat temp = grayImage8.clone(); + // undistort (temp, grayImage8, cameraMatrix, distortionCoefficients); + // } + + imshow ("Confidence", confidenceImage); +#endif + } + + void setLensParameters (const LensParameters &lensParameters) + { + // Construct the camera matrix + // (fx 0 cx) + // (0 fy cy) + // (0 0 1 ) + cameraMatrix = (Mat1d (3, 3) << lensParameters.focalLength.first, 0, lensParameters.principalPoint.first, + 0, lensParameters.focalLength.second, lensParameters.principalPoint.second, + 0, 0, 1); + + // Construct the distortion coefficients + // k1 k2 p1 p2 k3 + distortionCoefficients = (Mat1d (1, 5) << lensParameters.distortionRadial[0], + lensParameters.distortionRadial[1], + lensParameters.distortionTangential.first, + lensParameters.distortionTangential.second, + lensParameters.distortionRadial[2]); + } + + void toggleUndistort() + { + std::lock_guard lock (flagMutex); + undistortImage = !undistortImage; + } + + void toggleNormBlur() + { + std::lock_guard lock (flagMutex); + normblurImage = !normblurImage; + } + +private: + + // adjust z value to fit fixed scaling, here max dist is 2.5m + // the max dist here is used as an example and can be modified + float adjustZValue (float zValue) + { + float clampedDist = std::min (2.5f, zValue); + float newZValue = clampedDist / 2.5f * 255.0f; + return newZValue; + } + + // adjust gray value to fit fixed scaling, here max value is 180 + // the max value here is used as an example and can be modified + float adjustGrayValue (uint16_t grayValue) + { + float clampedVal = std::min (180.0f, grayValue * 1.0f); + float newGrayValue = clampedVal / 180.f * 255.0f; + return newGrayValue; + } + + bool naiveIsObstaclePresent(bool free_paths[num_dist_columns_]) + { + bool rv=false; + for (uint32_t col=0; col distance_threshold_; + if (!free_paths[col]) + { + rv=true; + } + } + return rv; + } + + void naiveObstacleAvoidanceDemo() + { + bool free_paths[num_dist_columns_]; + if (naiveIsObstaclePresent(free_paths)) + { + assert(num_dist_columns_ == 4); + if (free_paths[1] && free_paths[2]) + { + std::cout << "GOSTRAIGHT" << std::endl; + } else if (free_paths[0]) + { + std::cout << "GOLEFT" << std::endl; + } else if (free_paths[3]) + { + std::cout << "GORIGHT" << std::endl; + } else { + std::cout << "STOP" << std::endl; + } + } + } + + // define images for depth and gray + // and for their 8Bit and scaled versions + Mat zImage, zImage8, scaledZImage; + Mat grayImage, grayImage8, scaledGrayImage; + Mat confidenceImage; + + // lens matrices used for the undistortion of + // the image + Mat cameraMatrix; + Mat distortionCoefficients; + + std::mutex flagMutex; + bool undistortImage; +}; + +int main (int argc, char *argv[]) +{ + // Windows requires that the application allocate these, not the DLL. + PlatformResources resources; + + // This is the data listener which will receive callbacks. It's declared + // before the cameraDevice so that, if this function exits with a 'return' + // statement while the camera is still capturing, it will still be in scope + // until the cameraDevice's destructor implicitly de-registers the listener. + MyListener listener; + + // this represents the main camera device object + std::unique_ptr cameraDevice; + + // the camera manager will query for a connected camera + { + CameraManager manager; + + // check the number of arguments + if (argc > 1) + { + // if the program was called with an argument try to open this as a file + cout << "Trying to open : " << argv[1] << endl; + cameraDevice = manager.createCamera (argv[1]); + } + else + { + // if no argument was given try to open the first connected camera + royale::Vector camlist (manager.getConnectedCameraList()); + cout << "Detected " << camlist.size() << " camera(s)." << endl; + + if (!camlist.empty()) + { + cameraDevice = manager.createCamera (camlist[0]); + } + else + { + cerr << "No suitable camera device detected." << endl + << "Please make sure that a supported camera is plugged in, all drivers are " + << "installed, and you have proper USB permission" << endl; + return 1; + } + + camlist.clear(); + } + } + // the camera device is now available and CameraManager can be deallocated here + + if (cameraDevice == nullptr) + { + // no cameraDevice available + if (argc > 1) + { + cerr << "Could not open " << argv[1] << endl; + return 1; + } + else + { + cerr << "Cannot create the camera device" << endl; + return 1; + } + } + + // IMPORTANT: call the initialize method before working with the camera device + auto status = cameraDevice->initialize(); + if (status != CameraStatus::SUCCESS) + { + cerr << "Cannot initialize the camera device, error string : " << getErrorString (status) << endl; + return 1; + } + + // retrieve the lens parameters from Royale + LensParameters lensParameters; + status = cameraDevice->getLensParameters (lensParameters); + if (status != CameraStatus::SUCCESS) + { + cerr << "Can't read out the lens parameters" << endl; + return 1; + } + + listener.setLensParameters (lensParameters); + + // register a data listener + if (cameraDevice->registerDataListener (&listener) != CameraStatus::SUCCESS) + { + cerr << "Error registering data listener" << endl; + return 1; + } + +#ifdef SHOW_GUI_WINDOWS + // create two windows + namedWindow ("Depth", WINDOW_AUTOSIZE); + // namedWindow ("Gray", WINDOW_AUTOSIZE); + namedWindow ("Confidence", WINDOW_AUTOSIZE); + // namedWindow ("column", WINDOW_AUTOSIZE); +#endif + + + // set an operation mode + if (cameraDevice->setUseCase ("MODE_5_45FPS_500") != royale::CameraStatus::SUCCESS) + { + cerr << "Error setting use case" << endl; + return 1; + } + + // start capture mode + if (cameraDevice->startCapture() != CameraStatus::SUCCESS) + { + cerr << "Error starting the capturing" << endl; + return 1; + } + + + // Change the exposure time for the first stream of the use case (Royale will limit this to an + // eye-safe exposure time, with limits defined by the use case). The time is given in + // microseconds. + // + // Non-mixed mode use cases have exactly one stream, mixed mode use cases have more than one. + // For this example we only change the first stream. + // if (cameraDevice->setExposureTime (200, streamIds[0]) != royale::CameraStatus::SUCCESS) + // { + // cerr << "Cannot set exposure time for stream" << streamIds[0] << endl; + // } + // else + // { + // cout << "Changed exposure time for stream " << streamIds[0] << " to 200 microseconds ..." << endl; + // } + + + int currentKey = 0; + + while (currentKey != 27) + { + // wait until a key is pressed + currentKey = waitKey (0) & 255; + + if (currentKey == 'd') + { + // toggle the undistortion of the image + listener.toggleUndistort(); + } + + if (currentKey == 'b') + { + // toggle the undistortion of the image + listener.toggleNormBlur(); + } + } + + // stop capture mode + if (cameraDevice->stopCapture() != CameraStatus::SUCCESS) + { + cerr << "Error stopping the capturing" << endl; + return 1; + } + + return 0; +} diff --git a/cpp/sampleOpenCV/CMakeLists.txt b/cpp/sampleOpenCV/CMakeLists.txt deleted file mode 100644 index d9b9d74..0000000 --- a/cpp/sampleOpenCV/CMakeLists.txt +++ /dev/null @@ -1,42 +0,0 @@ -cmake_minimum_required(VERSION 2.8) - -# Please insert your OpenCV path -set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "/usr") - -# This needs at least OpenCV 2.x, but it should also work with OpenCV 3.x -find_package(OpenCV) - -if(NOT OpenCV_FOUND) - message("OpenCV example will not be build as no OpenCV was found!") - return() -endif() - -set_target_properties(${OpenCV_LIBS} PROPERTIES MAP_IMPORTED_CONFIG_RELWITHDEBINFO RELEASE) -set_target_properties(${OpenCV_LIBS} PROPERTIES MAP_IMPORTED_CONFIG_MINSIZEREL RELEASE) - -#set(CMAKE_PREFIX_PATH "../../../share") -set(CMAKE_PREFIX_PATH "../../../libroyale-3.21.1.70-LINUX-x86-64Bit/share") - -find_package(royale REQUIRED) - -link_directories(${royale_LIB_DIR}) - -project (sampleOpenCV) - -include_directories( - ${CMAKE_CURRENT_SOURCE_DIR}/../../inc -) - -add_executable(sampleOpenCV - sampleOpenCV.cpp - ) - -if(WIN32) - if(OpenCV_STATIC) - target_compile_options(sampleOpenCV PRIVATE "/MT$<$:d>") - endif() -endif() - -COPY_ROYALE_LIBS(sampleOpenCV) - -target_link_libraries(sampleOpenCV "${royale_LIBS}" "${OpenCV_LIBS}") diff --git a/cpp/sampleOpenCV/sampleOpenCV.cpp b/cpp/sampleOpenCV/sampleOpenCV.cpp deleted file mode 100644 index 6170396..0000000 --- a/cpp/sampleOpenCV/sampleOpenCV.cpp +++ /dev/null @@ -1,454 +0,0 @@ -/****************************************************************************\ - * Copyright (C) 2017 Infineon Technologies & pmdtechnologies ag - * - * THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY - * KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A - * PARTICULAR PURPOSE. - * - \****************************************************************************/ - -#include -#include -#include -#include - -#include - -using namespace royale; -using namespace sample_utils; -using namespace std; -using namespace cv; - -//#define SHOW_GUI_WINDOWS - -// Linker errors for the OpenCV sample -// -// If this example gives linker errors about undefined references to cv::namedWindow and cv::imshow, -// or QFontEngine::glyphCache and qMessageFormatString (from OpenCV to Qt), it may be caused by a -// change in the compiler's C++ ABI. -// -// With Ubuntu and Debian's distribution packages, the libopencv packages that have 'v5' at the end -// of their name, for example libopencv-video2.4v5, are compatible with GCC 5 (and GCC 6), but -// incompatible with GCC 4.8 and GCC 4.9. The -dev packages don't have the postfix, but depend on -// the v5 (or non-v5) version of the corresponding lib package. When Ubuntu moves to OpenCV 3.0, -// they're likely to drop the postfix (but the packages will be for GCC 5 or later). -// -// If you are manually installing OpenCV or Qt, you need to ensure that the binaries were compiled -// with the same version of the compiler. The version number of the packages themselves doesn't say -// which ABI they use, it depends on which version of the compiler was used. - -class MyListener : public IDepthDataListener -{ - - static uint32_t const num_dist_columns_=4; - double latest_min_distance_[num_dist_columns_]; - double latest_min_distance_diff_[num_dist_columns_]; - - const int DELAY_BLUR = 100; - // const int MAX_KERNEL_LENGTH = 31; - const int MAX_KERNEL_LENGTH = 23; - - bool normblurImage = true; - - const uint8_t confidence_threshold_ = 0; - const double distance_threshold_ = 80; - -public : - - - MyListener() : - undistortImage (false) - { - } - - void onNewData (const DepthData *data) - { - // this callback function will be called for every new - // depth frame - - std::lock_guard lock (flagMutex); - - // create two images which will be filled afterwards - // each image containing one 32Bit channel - zImage.create (Size (data->width, data->height), CV_32FC1); -#ifdef SHOW_GUI_WINDOWS - grayImage.create (Size (data->width, data->height), CV_32FC1); - confidenceImage.create (Size (data->width, data->height), CV_8UC1); -#endif - - // set the image to zero - zImage = Scalar::all (0); -#ifdef SHOW_GUI_WINDOWS - grayImage = Scalar::all (0); -#endif - - int k = 0; - for (int y = 0; y < zImage.rows; y++) - { - float *zRowPtr = zImage.ptr (y); -#ifdef SHOW_GUI_WINDOWS - float *grayRowPtr = grayImage.ptr (y); - uint8_t *confRowPtr = confidenceImage.ptr (y); -#endif - for (int x = 0; x < zImage.cols; x++, k++) - { - auto curPoint = data->points.at (k); -#ifdef SHOW_GUI_WINDOWS - confRowPtr[x] = curPoint.depthConfidence; -#endif - if (curPoint.depthConfidence > confidence_threshold_) - { - // if the point is valid, map the pixel from 3D world - // coordinates to a 2D plane (this will distort the image) - zRowPtr[x] = adjustZValue (curPoint.z); -#ifdef SHOW_GUI_WINDOWS - grayRowPtr[x] = adjustGrayValue (curPoint.grayValue); -#endif - } else { - //asume point is as far away as possible and thus "SAFE" for obstacle avoidance - zRowPtr[x] = 255; -#ifdef SHOW_GUI_WINDOWS - grayRowPtr[x] = 255; -#endif - } - } - } - - // create images to store the 8Bit version (some OpenCV - // functions may only work on 8Bit images) - zImage8.create (Size (data->width, data->height), CV_8UC1); -#ifdef SHOW_GUI_WINDOWS - grayImage8.create (Size (data->width, data->height), CV_8UC1); -#endif - // convert images to the 8Bit version - // This sample uses a fixed scaling of the values to (0, 255) to avoid flickering. - // You can also replace this with an automatic scaling by using - // normalize(zImage, zImage8, 0, 255, NORM_MINMAX, CV_8UC1) - // normalize(grayImage, grayImage8, 0, 255, NORM_MINMAX, CV_8UC1) - zImage.convertTo (zImage8, CV_8UC1); -#ifdef SHOW_GUI_WINDOWS - grayImage.convertTo (grayImage8, CV_8UC1); -#endif - - if (undistortImage) - { - // call the undistortion function on the z image - Mat temp = zImage8.clone(); - undistort (temp, zImage8, cameraMatrix, distortionCoefficients); - } - - if (normblurImage) - { - auto temp = zImage8.clone(); - for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 ) - { - blur( temp, zImage8, Size( i, i ), Point(-1,-1) ); - } - } - - //// Debug: show column part of image - // Mat subimg = zImage8(Rect((zImage.cols/num_dist_columns_)*3,0, zImage.cols/num_dist_columns_ , zImage8.rows)); - // imshow ("column", subimg); - - //detect column distance - auto col_width = zImage.cols/num_dist_columns_; - for (uint32_t col=0; colwidth * 4, data->height * 4), CV_8UC1); - // resize (zImage8, scaledZImage, scaledZImage.size()); - // imshow ("Depth", scaledZImage); - -#ifdef SHOW_GUI_WINDOWS - imshow ("Depth", zImage8); - - // scale and display the gray image - // scaledGrayImage.create (Size (data->width * 4, data->height * 4), CV_8UC1); - // resize (grayImage8, scaledGrayImage, scaledGrayImage.size()); - // imshow ("Gray", scaledGrayImage); - - // if (undistortImage) - // { - // // call the undistortion function on the gray image - // Mat temp = grayImage8.clone(); - // undistort (temp, grayImage8, cameraMatrix, distortionCoefficients); - // } - - imshow ("Confidence", confidenceImage); -#endif - } - - void setLensParameters (const LensParameters &lensParameters) - { - // Construct the camera matrix - // (fx 0 cx) - // (0 fy cy) - // (0 0 1 ) - cameraMatrix = (Mat1d (3, 3) << lensParameters.focalLength.first, 0, lensParameters.principalPoint.first, - 0, lensParameters.focalLength.second, lensParameters.principalPoint.second, - 0, 0, 1); - - // Construct the distortion coefficients - // k1 k2 p1 p2 k3 - distortionCoefficients = (Mat1d (1, 5) << lensParameters.distortionRadial[0], - lensParameters.distortionRadial[1], - lensParameters.distortionTangential.first, - lensParameters.distortionTangential.second, - lensParameters.distortionRadial[2]); - } - - void toggleUndistort() - { - std::lock_guard lock (flagMutex); - undistortImage = !undistortImage; - } - - void toggleNormBlur() - { - std::lock_guard lock (flagMutex); - normblurImage = !normblurImage; - } - -private: - - // adjust z value to fit fixed scaling, here max dist is 2.5m - // the max dist here is used as an example and can be modified - float adjustZValue (float zValue) - { - float clampedDist = std::min (2.5f, zValue); - float newZValue = clampedDist / 2.5f * 255.0f; - return newZValue; - } - - // adjust gray value to fit fixed scaling, here max value is 180 - // the max value here is used as an example and can be modified - float adjustGrayValue (uint16_t grayValue) - { - float clampedVal = std::min (180.0f, grayValue * 1.0f); - float newGrayValue = clampedVal / 180.f * 255.0f; - return newGrayValue; - } - - bool naiveIsObstaclePresent(bool free_paths[num_dist_columns_]) - { - bool rv=false; - for (uint32_t col=0; col distance_threshold_; - if (!free_paths[col]) - { - rv=true; - } - } - return rv; - } - - void naiveObstacleAvoidanceDemo() - { - bool free_paths[num_dist_columns_]; - if (naiveIsObstaclePresent(free_paths)) - { - assert(num_dist_columns_ == 4); - if (free_paths[1] && free_paths[2]) - { - std::cout << "GOSTRAIGHT" << std::endl; - } else if (free_paths[0]) - { - std::cout << "GOLEFT" << std::endl; - } else if (free_paths[3]) - { - std::cout << "GORIGHT" << std::endl; - } else { - std::cout << "STOP" << std::endl; - } - } - } - - // define images for depth and gray - // and for their 8Bit and scaled versions - Mat zImage, zImage8, scaledZImage; - Mat grayImage, grayImage8, scaledGrayImage; - Mat confidenceImage; - - // lens matrices used for the undistortion of - // the image - Mat cameraMatrix; - Mat distortionCoefficients; - - std::mutex flagMutex; - bool undistortImage; -}; - -int main (int argc, char *argv[]) -{ - // Windows requires that the application allocate these, not the DLL. - PlatformResources resources; - - // This is the data listener which will receive callbacks. It's declared - // before the cameraDevice so that, if this function exits with a 'return' - // statement while the camera is still capturing, it will still be in scope - // until the cameraDevice's destructor implicitly de-registers the listener. - MyListener listener; - - // this represents the main camera device object - std::unique_ptr cameraDevice; - - // the camera manager will query for a connected camera - { - CameraManager manager; - - // check the number of arguments - if (argc > 1) - { - // if the program was called with an argument try to open this as a file - cout << "Trying to open : " << argv[1] << endl; - cameraDevice = manager.createCamera (argv[1]); - } - else - { - // if no argument was given try to open the first connected camera - royale::Vector camlist (manager.getConnectedCameraList()); - cout << "Detected " << camlist.size() << " camera(s)." << endl; - - if (!camlist.empty()) - { - cameraDevice = manager.createCamera (camlist[0]); - } - else - { - cerr << "No suitable camera device detected." << endl - << "Please make sure that a supported camera is plugged in, all drivers are " - << "installed, and you have proper USB permission" << endl; - return 1; - } - - camlist.clear(); - } - } - // the camera device is now available and CameraManager can be deallocated here - - if (cameraDevice == nullptr) - { - // no cameraDevice available - if (argc > 1) - { - cerr << "Could not open " << argv[1] << endl; - return 1; - } - else - { - cerr << "Cannot create the camera device" << endl; - return 1; - } - } - - // IMPORTANT: call the initialize method before working with the camera device - auto status = cameraDevice->initialize(); - if (status != CameraStatus::SUCCESS) - { - cerr << "Cannot initialize the camera device, error string : " << getErrorString (status) << endl; - return 1; - } - - // retrieve the lens parameters from Royale - LensParameters lensParameters; - status = cameraDevice->getLensParameters (lensParameters); - if (status != CameraStatus::SUCCESS) - { - cerr << "Can't read out the lens parameters" << endl; - return 1; - } - - listener.setLensParameters (lensParameters); - - // register a data listener - if (cameraDevice->registerDataListener (&listener) != CameraStatus::SUCCESS) - { - cerr << "Error registering data listener" << endl; - return 1; - } - -#ifdef SHOW_GUI_WINDOWS - // create two windows - namedWindow ("Depth", WINDOW_AUTOSIZE); - // namedWindow ("Gray", WINDOW_AUTOSIZE); - namedWindow ("Confidence", WINDOW_AUTOSIZE); - // namedWindow ("column", WINDOW_AUTOSIZE); -#endif - - - // set an operation mode - if (cameraDevice->setUseCase ("MODE_5_45FPS_500") != royale::CameraStatus::SUCCESS) - { - cerr << "Error setting use case" << endl; - return 1; - } - - // start capture mode - if (cameraDevice->startCapture() != CameraStatus::SUCCESS) - { - cerr << "Error starting the capturing" << endl; - return 1; - } - - - // Change the exposure time for the first stream of the use case (Royale will limit this to an - // eye-safe exposure time, with limits defined by the use case). The time is given in - // microseconds. - // - // Non-mixed mode use cases have exactly one stream, mixed mode use cases have more than one. - // For this example we only change the first stream. - // if (cameraDevice->setExposureTime (200, streamIds[0]) != royale::CameraStatus::SUCCESS) - // { - // cerr << "Cannot set exposure time for stream" << streamIds[0] << endl; - // } - // else - // { - // cout << "Changed exposure time for stream " << streamIds[0] << " to 200 microseconds ..." << endl; - // } - - - int currentKey = 0; - - while (currentKey != 27) - { - // wait until a key is pressed - currentKey = waitKey (0) & 255; - - if (currentKey == 'd') - { - // toggle the undistortion of the image - listener.toggleUndistort(); - } - - if (currentKey == 'b') - { - // toggle the undistortion of the image - listener.toggleNormBlur(); - } - } - - // stop capture mode - if (cameraDevice->stopCapture() != CameraStatus::SUCCESS) - { - cerr << "Error stopping the capturing" << endl; - return 1; - } - - return 0; -} -- 1.7.10.4