修改链接opencv版本为3410版本

This commit is contained in:
lovelyyoung 2021-08-04 11:44:12 +08:00
parent 7f9ec7e331
commit a577af2d56
130 changed files with 10764 additions and 21831 deletions

View File

@ -101,8 +101,7 @@
<SubSystem>Windows</SubSystem> <SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation> <GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>.\pub\external\lib;.\pub\opencv\lib;D:\local\boost_1_71_0_b1_rc1\lib32-msvc-14.1</AdditionalLibraryDirectories> <AdditionalLibraryDirectories>.\pub\external\lib;.\pub\opencv\lib;D:\local\boost_1_71_0_b1_rc1\lib32-msvc-14.1</AdditionalLibraryDirectories>
<AdditionalDependencies>turbojpeg.lib;opencv_core2410d.lib;opencv_highgui2410d.lib; <AdditionalDependencies>turbojpeg.lib;opencv_world346d.lib;%(AdditionalDependencies)</AdditionalDependencies>
opencv_imgproc2410d.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link> </Link>
<Midl> <Midl>
<MkTypLibCompatible>false</MkTypLibCompatible> <MkTypLibCompatible>false</MkTypLibCompatible>
@ -160,8 +159,7 @@ opencv_imgproc2410d.lib;%(AdditionalDependencies)</AdditionalDependencies>
<EnableCOMDATFolding>true</EnableCOMDATFolding> <EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences> <OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>.\pub\external\lib;.\pub\opencv\lib;D:\local\boost_1_71_0_b1_rc1\lib32-msvc-14.1</AdditionalLibraryDirectories> <AdditionalLibraryDirectories>.\pub\external\lib;.\pub\opencv\lib;D:\local\boost_1_71_0_b1_rc1\lib32-msvc-14.1</AdditionalLibraryDirectories>
<AdditionalDependencies>turbojpeg.lib;opencv_core2410.lib;opencv_highgui2410.lib; <AdditionalDependencies>turbojpeg.lib;opencv_world346.lib;%(AdditionalDependencies)</AdditionalDependencies>
opencv_imgproc2410.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link> </Link>
<Midl> <Midl>
<MkTypLibCompatible>false</MkTypLibCompatible> <MkTypLibCompatible>false</MkTypLibCompatible>

Binary file not shown.

View File

@ -40,8 +40,8 @@
// //
//M*/ //M*/
#ifndef __OPENCV_OLD_CV_H__ #ifndef OPENCV_OLD_CV_H
#define __OPENCV_OLD_CV_H__ #define OPENCV_OLD_CV_H
#if defined(_MSC_VER) #if defined(_MSC_VER)
#define CV_DO_PRAGMA(x) __pragma(x) #define CV_DO_PRAGMA(x) __pragma(x)
@ -61,22 +61,13 @@
//CV_WARNING("This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module") //CV_WARNING("This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module")
#include "opencv2/core/core_c.h" #include "opencv2/core/core_c.h"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/photo/photo_c.h"
#include "opencv2/video/tracking.hpp" #include "opencv2/video/tracking_c.h"
#include "opencv2/features2d/features2d.hpp" #include "opencv2/objdetect/objdetect_c.h"
#include "opencv2/flann/flann.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/legacy/compat.hpp"
#if !defined(CV_IMPL) #if !defined(CV_IMPL)
#define CV_IMPL extern "C" #define CV_IMPL extern "C"
#endif //CV_IMPL #endif //CV_IMPL
#if defined(__cplusplus)
#include "opencv2/core/internal.hpp"
#endif //__cplusplus
#endif // __OPENCV_OLD_CV_H_ #endif // __OPENCV_OLD_CV_H_

View File

@ -40,13 +40,21 @@
// //
//M*/ //M*/
#ifndef __OPENCV_OLD_CV_HPP__ #ifndef OPENCV_OLD_CV_HPP
#define __OPENCV_OLD_CV_HPP__ #define OPENCV_OLD_CV_HPP
//#if defined(__GNUC__) //#if defined(__GNUC__)
//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module" //#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module"
//#endif //#endif
#include <cv.h> #include "cv.h"
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/photo.hpp"
#include "opencv2/video.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/objdetect.hpp"
#endif #endif

View File

@ -39,26 +39,18 @@
// //
//M*/ //M*/
#ifndef __OPENCV_OLD_AUX_H__ #ifndef OPENCV_OLD_AUX_H
#define __OPENCV_OLD_AUX_H__ #define OPENCV_OLD_AUX_H
//#if defined(__GNUC__) //#if defined(__GNUC__)
//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module" //#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module"
//#endif //#endif
#include "opencv2/core/core_c.h" #include "opencv2/core/core_c.h"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/photo/photo_c.h"
#include "opencv2/video/tracking.hpp" #include "opencv2/video/tracking_c.h"
#include "opencv2/video/background_segm.hpp" #include "opencv2/objdetect/objdetect_c.h"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/legacy/legacy.hpp"
#include "opencv2/legacy/compat.hpp"
#include "opencv2/legacy/blobtrack.hpp"
#include "opencv2/contrib/contrib.hpp"
#endif #endif

View File

@ -39,13 +39,14 @@
// //
//M*/ //M*/
#ifndef __OPENCV_OLD_AUX_HPP__ #ifndef OPENCV_OLD_AUX_HPP
#define __OPENCV_OLD_AUX_HPP__ #define OPENCV_OLD_AUX_HPP
//#if defined(__GNUC__) //#if defined(__GNUC__)
//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module" //#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module"
//#endif //#endif
#include <cvaux.h> #include "cvaux.h"
#include "opencv2/core/utility.hpp"
#endif #endif

View File

@ -38,8 +38,8 @@
// the use of this software, even if advised of the possibility of such damage. // the use of this software, even if advised of the possibility of such damage.
#ifndef __OPENCV_OLD_WIMAGE_HPP__ #ifndef OPENCV_OLD_WIMAGE_HPP
#define __OPENCV_OLD_WIMAGE_HPP__ #define OPENCV_OLD_WIMAGE_HPP
#include "opencv2/core/wimage.hpp" #include "opencv2/core/wimage.hpp"

View File

@ -40,14 +40,13 @@
// //
//M*/ //M*/
#ifndef __OPENCV_OLD_CXCORE_H__ #ifndef OPENCV_OLD_CXCORE_H
#define __OPENCV_OLD_CXCORE_H__ #define OPENCV_OLD_CXCORE_H
//#if defined(__GNUC__) //#if defined(__GNUC__)
//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module" //#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module"
//#endif //#endif
#include "opencv2/core/core_c.h" #include "opencv2/core/core_c.h"
#include "opencv2/core/core.hpp"
#endif #endif

View File

@ -40,13 +40,14 @@
// //
//M*/ //M*/
#ifndef __OPENCV_OLD_CXCORE_HPP__ #ifndef OPENCV_OLD_CXCORE_HPP
#define __OPENCV_OLD_CXCORE_HPP__ #define OPENCV_OLD_CXCORE_HPP
//#if defined(__GNUC__) //#if defined(__GNUC__)
//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module" //#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module"
//#endif //#endif
#include <cxcore.h> #include "cxcore.h"
#include "opencv2/core.hpp"
#endif #endif

View File

@ -40,8 +40,8 @@
// //
//M*/ //M*/
#ifndef __OPENCV_OLD_EIGEN_HPP__ #ifndef OPENCV_OLD_EIGEN_HPP
#define __OPENCV_OLD_EIGEN_HPP__ #define OPENCV_OLD_EIGEN_HPP
#include "opencv2/core/eigen.hpp" #include "opencv2/core/eigen.hpp"

View File

@ -1,6 +1,8 @@
#ifndef __OPENCV_OLD_CXMISC_H__ #ifndef OPENCV_OLD_CXMISC_H
#define __OPENCV_OLD_CXMISC_H__ #define OPENCV_OLD_CXMISC_H
#include "opencv2/core/internal.hpp" #ifdef __cplusplus
# include "opencv2/core/utility.hpp"
#endif
#endif #endif

View File

@ -39,12 +39,10 @@
// //
//M*/ //M*/
#ifndef __OPENCV_OLD_HIGHGUI_H__ #ifndef OPENCV_OLD_HIGHGUI_H
#define __OPENCV_OLD_HIGHGUI_H__ #define OPENCV_OLD_HIGHGUI_H
#include "opencv2/core/core_c.h" #include "opencv2/core/core_c.h"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui_c.h" #include "opencv2/highgui/highgui_c.h"
#include "opencv2/highgui/highgui.hpp"
#endif #endif

View File

@ -38,11 +38,10 @@
// //
//M*/ //M*/
#ifndef __OPENCV_OLD_ML_H__ #ifndef OPENCV_OLD_ML_H
#define __OPENCV_OLD_ML_H__ #define OPENCV_OLD_ML_H
#include "opencv2/core/core_c.h" #include "opencv2/core/core_c.h"
#include "opencv2/core/core.hpp" #include "opencv2/ml.hpp"
#include "opencv2/ml/ml.hpp"
#endif #endif

View File

@ -7,11 +7,12 @@
// copy or use the software. // copy or use the software.
// //
// //
// License Agreement // License Agreement
// For Open Source Computer Vision Library // For Open Source Computer Vision Library
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -40,772 +41,8 @@
// //
//M*/ //M*/
#ifndef __OPENCV_CALIB3D_HPP__ #ifdef __OPENCV_BUILD
#define __OPENCV_CALIB3D_HPP__ #error this is a compatibility header which should not be used inside the OpenCV library
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/core/affine.hpp"
#ifdef __cplusplus
extern "C" {
#endif #endif
/****************************************************************************************\ #include "opencv2/calib3d.hpp"
* Camera Calibration, Pose Estimation and Stereo *
\****************************************************************************************/
typedef struct CvPOSITObject CvPOSITObject;
/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */
CVAPI(CvPOSITObject*) cvCreatePOSITObject( CvPoint3D32f* points, int point_count );
/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of
an object given its model and projection in a weak-perspective case */
CVAPI(void) cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points,
double focal_length, CvTermCriteria criteria,
float* rotation_matrix, float* translation_vector);
/* Releases CvPOSITObject structure */
CVAPI(void) cvReleasePOSITObject( CvPOSITObject** posit_object );
/* updates the number of RANSAC iterations */
CVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob,
int model_points, int max_iters );
CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst );
/* Calculates fundamental matrix given a set of corresponding points */
#define CV_FM_7POINT 1
#define CV_FM_8POINT 2
#define CV_LMEDS 4
#define CV_RANSAC 8
#define CV_FM_LMEDS_ONLY CV_LMEDS
#define CV_FM_RANSAC_ONLY CV_RANSAC
#define CV_FM_LMEDS CV_LMEDS
#define CV_FM_RANSAC CV_RANSAC
enum
{
CV_ITERATIVE = 0,
CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
};
CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
CvMat* fundamental_matrix,
int method CV_DEFAULT(CV_FM_RANSAC),
double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99),
CvMat* status CV_DEFAULT(NULL) );
/* For each input point on one of images
computes parameters of the corresponding
epipolar line on the other image */
CVAPI(void) cvComputeCorrespondEpilines( const CvMat* points,
int which_image,
const CvMat* fundamental_matrix,
CvMat* correspondent_lines );
/* Triangulation functions */
CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2,
CvMat* projPoints1, CvMat* projPoints2,
CvMat* points4D);
CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2,
CvMat* new_points1, CvMat* new_points2);
/* Computes the optimal new camera matrix according to the free scaling parameter alpha:
alpha=0 - only valid pixels will be retained in the undistorted image
alpha=1 - all the source image pixels will be retained in the undistorted image
*/
CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix,
const CvMat* dist_coeffs,
CvSize image_size, double alpha,
CvMat* new_camera_matrix,
CvSize new_imag_size CV_DEFAULT(cvSize(0,0)),
CvRect* valid_pixel_ROI CV_DEFAULT(0),
int center_principal_point CV_DEFAULT(0));
/* Converts rotation vector to rotation matrix or vice versa */
CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst,
CvMat* jacobian CV_DEFAULT(0) );
/* Finds perspective transformation between the object plane and image (view) plane */
CVAPI(int) cvFindHomography( const CvMat* src_points,
const CvMat* dst_points,
CvMat* homography,
int method CV_DEFAULT(0),
double ransacReprojThreshold CV_DEFAULT(3),
CvMat* mask CV_DEFAULT(0));
/* Computes RQ decomposition for 3x3 matrices */
CVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
CvMat *matrixQx CV_DEFAULT(NULL),
CvMat *matrixQy CV_DEFAULT(NULL),
CvMat *matrixQz CV_DEFAULT(NULL),
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
/* Computes projection matrix decomposition */
CVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
CvMat *rotMatr, CvMat *posVect,
CvMat *rotMatrX CV_DEFAULT(NULL),
CvMat *rotMatrY CV_DEFAULT(NULL),
CvMat *rotMatrZ CV_DEFAULT(NULL),
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
/* Computes d(AB)/dA and d(AB)/dB */
CVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB );
/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)),
t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */
CVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
const CvMat* _rvec2, const CvMat* _tvec2,
CvMat* _rvec3, CvMat* _tvec3,
CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0),
CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0),
CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0),
CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) );
/* Projects object points to the view plane using
the specified extrinsic and intrinsic camera parameters */
CVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector,
const CvMat* translation_vector, const CvMat* camera_matrix,
const CvMat* distortion_coeffs, CvMat* image_points,
CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL),
CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL),
CvMat* dpddist CV_DEFAULT(NULL),
double aspect_ratio CV_DEFAULT(0));
/* Finds extrinsic camera parameters from
a few known corresponding point pairs and intrinsic parameters */
CVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points,
const CvMat* image_points,
const CvMat* camera_matrix,
const CvMat* distortion_coeffs,
CvMat* rotation_vector,
CvMat* translation_vector,
int use_extrinsic_guess CV_DEFAULT(0) );
/* Computes initial estimate of the intrinsic camera parameters
in case of planar calibration target (e.g. chessboard) */
CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points,
const CvMat* image_points,
const CvMat* npoints, CvSize image_size,
CvMat* camera_matrix,
double aspect_ratio CV_DEFAULT(1.) );
#define CV_CALIB_CB_ADAPTIVE_THRESH 1
#define CV_CALIB_CB_NORMALIZE_IMAGE 2
#define CV_CALIB_CB_FILTER_QUADS 4
#define CV_CALIB_CB_FAST_CHECK 8
// Performs a fast check if a chessboard is in the input image. This is a workaround to
// a problem of cvFindChessboardCorners being slow on images with no chessboard
// - src: input image
// - size: chessboard size
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// 0 if there is no chessboard, -1 in case of error
CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size);
/* Detects corners on a chessboard calibration pattern */
CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size,
CvPoint2D32f* corners,
int* corner_count CV_DEFAULT(NULL),
int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );
/* Draws individual chessboard corners or the whole chessboard detected */
CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
CvPoint2D32f* corners,
int count, int pattern_was_found );
#define CV_CALIB_USE_INTRINSIC_GUESS 1
#define CV_CALIB_FIX_ASPECT_RATIO 2
#define CV_CALIB_FIX_PRINCIPAL_POINT 4
#define CV_CALIB_ZERO_TANGENT_DIST 8
#define CV_CALIB_FIX_FOCAL_LENGTH 16
#define CV_CALIB_FIX_K1 32
#define CV_CALIB_FIX_K2 64
#define CV_CALIB_FIX_K3 128
#define CV_CALIB_FIX_K4 2048
#define CV_CALIB_FIX_K5 4096
#define CV_CALIB_FIX_K6 8192
#define CV_CALIB_RATIONAL_MODEL 16384
/* Finds intrinsic and extrinsic camera parameters
from a few views of known calibration pattern */
CVAPI(double) cvCalibrateCamera2( const CvMat* object_points,
const CvMat* image_points,
const CvMat* point_counts,
CvSize image_size,
CvMat* camera_matrix,
CvMat* distortion_coeffs,
CvMat* rotation_vectors CV_DEFAULT(NULL),
CvMat* translation_vectors CV_DEFAULT(NULL),
int flags CV_DEFAULT(0),
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
/* Computes various useful characteristics of the camera from the data computed by
cvCalibrateCamera2 */
CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix,
CvSize image_size,
double aperture_width CV_DEFAULT(0),
double aperture_height CV_DEFAULT(0),
double *fovx CV_DEFAULT(NULL),
double *fovy CV_DEFAULT(NULL),
double *focal_length CV_DEFAULT(NULL),
CvPoint2D64f *principal_point CV_DEFAULT(NULL),
double *pixel_aspect_ratio CV_DEFAULT(NULL));
#define CV_CALIB_FIX_INTRINSIC 256
#define CV_CALIB_SAME_FOCAL_LENGTH 512
/* Computes the transformation from one camera coordinate system to another one
from a few correspondent views of the same calibration target. Optionally, calibrates
both cameras */
CVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1,
const CvMat* image_points2, const CvMat* npoints,
CvMat* camera_matrix1, CvMat* dist_coeffs1,
CvMat* camera_matrix2, CvMat* dist_coeffs2,
CvSize image_size, CvMat* R, CvMat* T,
CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0),
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)),
int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC));
#define CV_CALIB_ZERO_DISPARITY 1024
/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both
views parallel (=> to make all the epipolar lines horizontal or vertical) */
CVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2,
const CvMat* dist_coeffs1, const CvMat* dist_coeffs2,
CvSize image_size, const CvMat* R, const CvMat* T,
CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2,
CvMat* Q CV_DEFAULT(0),
int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY),
double alpha CV_DEFAULT(-1),
CvSize new_image_size CV_DEFAULT(cvSize(0,0)),
CvRect* valid_pix_ROI1 CV_DEFAULT(0),
CvRect* valid_pix_ROI2 CV_DEFAULT(0));
/* Computes rectification transformations for uncalibrated pair of images using a set
of point correspondences */
CVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2,
const CvMat* F, CvSize img_size,
CvMat* H1, CvMat* H2,
double threshold CV_DEFAULT(5));
/* stereo correspondence parameters and functions */
#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
#define CV_STEREO_BM_XSOBEL 1
/* Block matching algorithm structure */
typedef struct CvStereoBMState
{
// pre-filtering (normalization of input images)
int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now
int preFilterSize; // averaging window size: ~5x5..21x21
int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap]
// correspondence using Sum of Absolute Difference (SAD)
int SADWindowSize; // ~5x5..21x21
int minDisparity; // minimum disparity (can be negative)
int numberOfDisparities; // maximum disparity - minimum disparity (> 0)
// post-filtering
int textureThreshold; // the disparity is only computed for pixels
// with textured enough neighborhood
int uniquenessRatio; // accept the computed disparity d* only if
// SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.)
// for any d != d*+/-1 within the search range.
int speckleWindowSize; // disparity variation window
int speckleRange; // acceptable range of variation in window
int trySmallerWindows; // if 1, the results may be more accurate,
// at the expense of slower processing
CvRect roi1, roi2;
int disp12MaxDiff;
// temporary buffers
CvMat* preFilteredImg0;
CvMat* preFilteredImg1;
CvMat* slidingSumBuf;
CvMat* cost;
CvMat* disp;
} CvStereoBMState;
#define CV_STEREO_BM_BASIC 0
#define CV_STEREO_BM_FISH_EYE 1
#define CV_STEREO_BM_NARROW 2
CVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC),
int numberOfDisparities CV_DEFAULT(0));
CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state );
CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right,
CvArr* disparity, CvStereoBMState* state );
CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
int numberOfDisparities, int SADWindowSize );
CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost,
int minDisparity, int numberOfDisparities,
int disp12MaxDiff CV_DEFAULT(1) );
/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */
CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage,
CvArr* _3dImage, const CvMat* Q,
int handleMissingValues CV_DEFAULT(0) );
#ifdef __cplusplus
}
//////////////////////////////////////////////////////////////////////////////////////////
class CV_EXPORTS CvLevMarq
{
public:
CvLevMarq();
CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
bool completeSymmFlag=false );
~CvLevMarq();
void init( int nparams, int nerrs, CvTermCriteria criteria=
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
bool completeSymmFlag=false );
bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
void clear();
void step();
enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
cv::Ptr<CvMat> mask;
cv::Ptr<CvMat> prevParam;
cv::Ptr<CvMat> param;
cv::Ptr<CvMat> J;
cv::Ptr<CvMat> err;
cv::Ptr<CvMat> JtJ;
cv::Ptr<CvMat> JtJN;
cv::Ptr<CvMat> JtErr;
cv::Ptr<CvMat> JtJV;
cv::Ptr<CvMat> JtJW;
double prevErrNorm, errNorm;
int lambdaLg10;
CvTermCriteria criteria;
int state;
int iters;
bool completeSymmFlag;
};
namespace cv
{
//! converts rotation vector to rotation matrix or vice versa using Rodrigues transformation
CV_EXPORTS_W void Rodrigues(InputArray src, OutputArray dst, OutputArray jacobian=noArray());
//! type of the robust estimation algorithm
enum
{
LMEDS=CV_LMEDS, //!< least-median algorithm
RANSAC=CV_RANSAC //!< RANSAC algorithm
};
//! computes the best-fit perspective transformation mapping srcPoints to dstPoints.
CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
int method=0, double ransacReprojThreshold=3,
OutputArray mask=noArray());
//! variant of findHomography for backward compatibility
CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,
OutputArray mask, int method=0, double ransacReprojThreshold=3);
//! Computes RQ decomposition of 3x3 matrix
CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
OutputArray Qx=noArray(),
OutputArray Qy=noArray(),
OutputArray Qz=noArray());
//! Decomposes the projection matrix into camera matrix and the rotation martix and the translation vector
CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix,
OutputArray rotMatrix, OutputArray transVect,
OutputArray rotMatrixX=noArray(),
OutputArray rotMatrixY=noArray(),
OutputArray rotMatrixZ=noArray(),
OutputArray eulerAngles=noArray() );
//! computes derivatives of the matrix product w.r.t each of the multiplied matrix coefficients
CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B,
OutputArray dABdA,
OutputArray dABdB );
//! composes 2 [R|t] transformations together. Also computes the derivatives of the result w.r.t the arguments
CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1,
InputArray rvec2, InputArray tvec2,
OutputArray rvec3, OutputArray tvec3,
OutputArray dr3dr1=noArray(), OutputArray dr3dt1=noArray(),
OutputArray dr3dr2=noArray(), OutputArray dr3dt2=noArray(),
OutputArray dt3dr1=noArray(), OutputArray dt3dt1=noArray(),
OutputArray dt3dr2=noArray(), OutputArray dt3dt2=noArray() );
//! projects points from the model coordinate space to the image coordinates. Also computes derivatives of the image coordinates w.r.t the intrinsic and extrinsic camera parameters
CV_EXPORTS_W void projectPoints( InputArray objectPoints,
InputArray rvec, InputArray tvec,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray imagePoints,
OutputArray jacobian=noArray(),
double aspectRatio=0 );
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are not handled.
enum
{
ITERATIVE=CV_ITERATIVE,
EPNP=CV_EPNP,
P3P=CV_P3P
};
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec,
bool useExtrinsicGuess=false, int flags=ITERATIVE);
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible.
CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints,
InputArray imagePoints,
InputArray cameraMatrix,
InputArray distCoeffs,
OutputArray rvec,
OutputArray tvec,
bool useExtrinsicGuess = false,
int iterationsCount = 100,
float reprojectionError = 8.0,
int minInliersCount = 100,
OutputArray inliers = noArray(),
int flags = ITERATIVE);
//! initializes camera matrix from a few 3D points and the corresponding projections.
CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints,
Size imageSize, double aspectRatio=1. );
enum { CALIB_CB_ADAPTIVE_THRESH = 1, CALIB_CB_NORMALIZE_IMAGE = 2,
CALIB_CB_FILTER_QUADS = 4, CALIB_CB_FAST_CHECK = 8 };
//! finds checkerboard pattern of the specified size in the image
CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize,
OutputArray corners,
int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE );
//! finds subpixel-accurate positions of the chessboard corners
CV_EXPORTS bool find4QuadCornerSubpix(InputArray img, InputOutputArray corners, Size region_size);
//! draws the checkerboard pattern (found or partly found) in the image
CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,
InputArray corners, bool patternWasFound );
enum { CALIB_CB_SYMMETRIC_GRID = 1, CALIB_CB_ASYMMETRIC_GRID = 2,
CALIB_CB_CLUSTERING = 4 };
//! finds circles' grid pattern of the specified size in the image
CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID,
const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector());
//! the deprecated function. Use findCirclesGrid() instead of it.
CV_EXPORTS_W bool findCirclesGridDefault( InputArray image, Size patternSize,
OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID );
enum
{
CALIB_USE_INTRINSIC_GUESS = CV_CALIB_USE_INTRINSIC_GUESS,
CALIB_FIX_ASPECT_RATIO = CV_CALIB_FIX_ASPECT_RATIO,
CALIB_FIX_PRINCIPAL_POINT = CV_CALIB_FIX_PRINCIPAL_POINT,
CALIB_ZERO_TANGENT_DIST = CV_CALIB_ZERO_TANGENT_DIST,
CALIB_FIX_FOCAL_LENGTH = CV_CALIB_FIX_FOCAL_LENGTH,
CALIB_FIX_K1 = CV_CALIB_FIX_K1,
CALIB_FIX_K2 = CV_CALIB_FIX_K2,
CALIB_FIX_K3 = CV_CALIB_FIX_K3,
CALIB_FIX_K4 = CV_CALIB_FIX_K4,
CALIB_FIX_K5 = CV_CALIB_FIX_K5,
CALIB_FIX_K6 = CV_CALIB_FIX_K6,
CALIB_RATIONAL_MODEL = CV_CALIB_RATIONAL_MODEL,
// only for stereo
CALIB_FIX_INTRINSIC = CV_CALIB_FIX_INTRINSIC,
CALIB_SAME_FOCAL_LENGTH = CV_CALIB_SAME_FOCAL_LENGTH,
// for stereo rectification
CALIB_ZERO_DISPARITY = CV_CALIB_ZERO_DISPARITY
};
//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints,
Size imageSize,
CV_OUT InputOutputArray cameraMatrix,
CV_OUT InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
int flags=0, TermCriteria criteria = TermCriteria(
TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) );
//! computes several useful camera characteristics from the camera matrix, camera frame resolution and the physical sensor size.
CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix,
Size imageSize,
double apertureWidth,
double apertureHeight,
CV_OUT double& fovx,
CV_OUT double& fovy,
CV_OUT double& focalLength,
CV_OUT Point2d& principalPoint,
CV_OUT double& aspectRatio );
//! finds intrinsic and extrinsic parameters of a stereo camera
CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints1,
InputArrayOfArrays imagePoints2,
CV_OUT InputOutputArray cameraMatrix1,
CV_OUT InputOutputArray distCoeffs1,
CV_OUT InputOutputArray cameraMatrix2,
CV_OUT InputOutputArray distCoeffs2,
Size imageSize, OutputArray R,
OutputArray T, OutputArray E, OutputArray F,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6),
int flags=CALIB_FIX_INTRINSIC );
//! computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters
CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,
InputArray cameraMatrix2, InputArray distCoeffs2,
Size imageSize, InputArray R, InputArray T,
OutputArray R1, OutputArray R2,
OutputArray P1, OutputArray P2,
OutputArray Q, int flags=CALIB_ZERO_DISPARITY,
double alpha=-1, Size newImageSize=Size(),
CV_OUT Rect* validPixROI1=0, CV_OUT Rect* validPixROI2=0 );
//! computes the rectification transformation for an uncalibrated stereo camera (zero distortion is assumed)
CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2,
InputArray F, Size imgSize,
OutputArray H1, OutputArray H2,
double threshold=5 );
//! computes the rectification transformations for 3-head camera, where all the heads are on the same line.
CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1,
InputArray cameraMatrix2, InputArray distCoeffs2,
InputArray cameraMatrix3, InputArray distCoeffs3,
InputArrayOfArrays imgpt1, InputArrayOfArrays imgpt3,
Size imageSize, InputArray R12, InputArray T12,
InputArray R13, InputArray T13,
OutputArray R1, OutputArray R2, OutputArray R3,
OutputArray P1, OutputArray P2, OutputArray P3,
OutputArray Q, double alpha, Size newImgSize,
CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags );
//! returns the optimal new camera matrix
CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs,
Size imageSize, double alpha, Size newImgSize=Size(),
CV_OUT Rect* validPixROI=0, bool centerPrincipalPoint=false);
//! converts point coordinates from normal pixel coordinates to homogeneous coordinates ((x,y)->(x,y,1))
CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst );
//! converts point coordinates from homogeneous to normal pixel coordinates ((x,y,z)->(x/z, y/z))
CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst );
//! for backward compatibility
CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
//! the algorithm for finding fundamental matrix
enum
{
FM_7POINT = CV_FM_7POINT, //!< 7-point algorithm
FM_8POINT = CV_FM_8POINT, //!< 8-point algorithm
FM_LMEDS = CV_FM_LMEDS, //!< least-median algorithm
FM_RANSAC = CV_FM_RANSAC //!< RANSAC algorithm
};
//! finds fundamental matrix from a set of corresponding 2D points
CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
int method=FM_RANSAC,
double param1=3., double param2=0.99,
OutputArray mask=noArray());
//! variant of findFundamentalMat for backward compatibility
CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
OutputArray mask, int method=FM_RANSAC,
double param1=3., double param2=0.99);
//! finds coordinates of epipolar lines corresponding the specified points
CV_EXPORTS_W void computeCorrespondEpilines( InputArray points,
int whichImage, InputArray F,
OutputArray lines );
CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
InputArray projPoints1, InputArray projPoints2,
OutputArray points4D );
CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2,
OutputArray newPoints1, OutputArray newPoints2 );
template<> CV_EXPORTS void Ptr<CvStereoBMState>::delete_obj();
/*!
Block Matching Stereo Correspondence Algorithm
The class implements BM stereo correspondence algorithm by K. Konolige.
*/
class CV_EXPORTS_W StereoBM
{
public:
enum { PREFILTER_NORMALIZED_RESPONSE = 0, PREFILTER_XSOBEL = 1,
BASIC_PRESET=0, FISH_EYE_PRESET=1, NARROW_PRESET=2 };
//! the default constructor
CV_WRAP StereoBM();
//! the full constructor taking the camera-specific preset, number of disparities and the SAD window size
CV_WRAP StereoBM(int preset, int ndisparities=0, int SADWindowSize=21);
//! the method that reinitializes the state. The previous content is destroyed
void init(int preset, int ndisparities=0, int SADWindowSize=21);
//! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair
CV_WRAP_AS(compute) void operator()( InputArray left, InputArray right,
OutputArray disparity, int disptype=CV_16S );
//! pointer to the underlying CvStereoBMState
Ptr<CvStereoBMState> state;
};
/*!
Semi-Global Block Matching Stereo Correspondence Algorithm
The class implements the original SGBM stereo correspondence algorithm by H. Hirschmuller and some its modification.
*/
class CV_EXPORTS_W StereoSGBM
{
public:
enum { DISP_SHIFT=4, DISP_SCALE = (1<<DISP_SHIFT) };
//! the default constructor
CV_WRAP StereoSGBM();
//! the full constructor taking all the necessary algorithm parameters
CV_WRAP StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize,
int P1=0, int P2=0, int disp12MaxDiff=0,
int preFilterCap=0, int uniquenessRatio=0,
int speckleWindowSize=0, int speckleRange=0,
bool fullDP=false);
//! the destructor
virtual ~StereoSGBM();
//! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair
CV_WRAP_AS(compute) virtual void operator()(InputArray left, InputArray right,
OutputArray disp);
CV_PROP_RW int minDisparity;
CV_PROP_RW int numberOfDisparities;
CV_PROP_RW int SADWindowSize;
CV_PROP_RW int preFilterCap;
CV_PROP_RW int uniquenessRatio;
CV_PROP_RW int P1;
CV_PROP_RW int P2;
CV_PROP_RW int speckleWindowSize;
CV_PROP_RW int speckleRange;
CV_PROP_RW int disp12MaxDiff;
CV_PROP_RW bool fullDP;
protected:
Mat buffer;
};
//! filters off speckles (small regions of incorrectly computed disparity)
CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal, int maxSpeckleSize, double maxDiff,
InputOutputArray buf=noArray() );
//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify())
CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,
int minDisparity, int numberOfDisparities,
int SADWindowSize );
//! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,
int minDisparity, int numberOfDisparities,
int disp12MaxDisp=1 );
//! reprojects disparity image to 3D: (x,y,d)->(X,Y,Z) using the matrix Q returned by cv::stereoRectify
CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
OutputArray _3dImage, InputArray Q,
bool handleMissingValues=false,
int ddepth=-1 );
CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
OutputArray out, OutputArray inliers,
double ransacThreshold=3, double confidence=0.99);
namespace fisheye
{
enum{
CALIB_USE_INTRINSIC_GUESS = 1,
CALIB_RECOMPUTE_EXTRINSIC = 2,
CALIB_CHECK_COND = 4,
CALIB_FIX_SKEW = 8,
CALIB_FIX_K1 = 16,
CALIB_FIX_K2 = 32,
CALIB_FIX_K3 = 64,
CALIB_FIX_K4 = 128,
CALIB_FIX_INTRINSIC = 256
};
//! projects 3D points using fisheye model
CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine,
InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
//! projects points using fisheye model
CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec,
InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
//! distorts 2D points using fisheye model
CV_EXPORTS void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0);
//! undistorts 2D points using fisheye model
CV_EXPORTS void undistortPoints(InputArray distorted, OutputArray undistorted,
InputArray K, InputArray D, InputArray R = noArray(), InputArray P = noArray());
//! computing undistortion and rectification maps for image transform by cv::remap()
//! If D is empty zero distortion is used, if R or P is empty identity matrixes are used
CV_EXPORTS void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P,
const cv::Size& size, int m1type, OutputArray map1, OutputArray map2);
//! undistorts image, optionally changes resolution and camera matrix. If Knew zero identity matrix is used
CV_EXPORTS void undistortImage(InputArray distorted, OutputArray undistorted,
InputArray K, InputArray D, InputArray Knew = cv::noArray(), const Size& new_size = Size());
//! estimates new camera matrix for undistortion or rectification
CV_EXPORTS void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R,
OutputArray P, double balance = 0.0, const Size& new_size = Size(), double fov_scale = 1.0);
//! performs camera calibaration
CV_EXPORTS double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size,
InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
//! stereo rectification estimation
CV_EXPORTS void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec,
OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize = Size(),
double balance = 0.0, double fov_scale = 1.0);
//! performs stereo calibaration
CV_EXPORTS double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize,
OutputArray R, OutputArray T, int flags = CALIB_FIX_INTRINSIC,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
}
}
#endif
#endif

View File

@ -41,15 +41,87 @@
// //
//M*/ //M*/
#ifndef __OPENCV_CORE_AFFINE3_HPP__ #ifndef OPENCV_CORE_AFFINE3_HPP
#define __OPENCV_CORE_AFFINE3_HPP__ #define OPENCV_CORE_AFFINE3_HPP
#ifdef __cplusplus #ifdef __cplusplus
#include <opencv2/core/core.hpp> #include <opencv2/core.hpp>
namespace cv namespace cv
{ {
//! @addtogroup core
//! @{
/** @brief Affine transform
*
* It represents a 4x4 homogeneous transformation matrix \f$T\f$
*
* \f[T =
* \begin{bmatrix}
* R & t\\
* 0 & 1\\
* \end{bmatrix}
* \f]
*
* where \f$R\f$ is a 3x3 rotation matrix and \f$t\f$ is a 3x1 translation vector.
*
* You can specify \f$R\f$ either by a 3x3 rotation matrix or by a 3x1 rotation vector,
* which is converted to a 3x3 rotation matrix by the Rodrigues formula.
*
* To construct a matrix \f$T\f$ representing first rotation around the axis \f$r\f$ with rotation
* angle \f$|r|\f$ in radian (right hand rule) and then translation by the vector \f$t\f$, you can use
*
* @code
* cv::Vec3f r, t;
* cv::Affine3f T(r, t);
* @endcode
*
* If you already have the rotation matrix \f$R\f$, then you can use
*
* @code
* cv::Matx33f R;
* cv::Affine3f T(R, t);
* @endcode
*
* To extract the rotation matrix \f$R\f$ from \f$T\f$, use
*
* @code
* cv::Matx33f R = T.rotation();
* @endcode
*
* To extract the translation vector \f$t\f$ from \f$T\f$, use
*
* @code
* cv::Vec3f t = T.translation();
* @endcode
*
* To extract the rotation vector \f$r\f$ from \f$T\f$, use
*
* @code
* cv::Vec3f r = T.rvec();
* @endcode
*
* Note that since the mapping from rotation vectors to rotation matrices
* is many to one. The returned rotation vector is not necessarily the one
* you used before to set the matrix.
*
* If you have two transformations \f$T = T_1 * T_2\f$, use
*
* @code
* cv::Affine3f T, T1, T2;
* T = T2.concatenate(T1);
* @endcode
*
* To get the inverse transform of \f$T\f$, use
*
* @code
* cv::Affine3f T, T_inv;
* T_inv = T.inv();
* @endcode
*
*/
template<typename T> template<typename T>
class Affine3 class Affine3
{ {
@ -59,56 +131,139 @@ namespace cv
typedef Matx<float_type, 4, 4> Mat4; typedef Matx<float_type, 4, 4> Mat4;
typedef Vec<float_type, 3> Vec3; typedef Vec<float_type, 3> Vec3;
//! Default constructor. It represents a 4x4 identity matrix.
Affine3(); Affine3();
//Augmented affine matrix //! Augmented affine matrix
Affine3(const Mat4& affine); Affine3(const Mat4& affine);
//Rotation matrix /**
* The resulting 4x4 matrix is
*
* \f[
* \begin{bmatrix}
* R & t\\
* 0 & 1\\
* \end{bmatrix}
* \f]
*
* @param R 3x3 rotation matrix.
* @param t 3x1 translation vector.
*/
Affine3(const Mat3& R, const Vec3& t = Vec3::all(0)); Affine3(const Mat3& R, const Vec3& t = Vec3::all(0));
//Rodrigues vector /**
* Rodrigues vector.
*
* The last row of the current matrix is set to [0,0,0,1].
*
* @param rvec 3x1 rotation vector. Its direction indicates the rotation axis and its length
* indicates the rotation angle in radian (using right hand rule).
* @param t 3x1 translation vector.
*/
Affine3(const Vec3& rvec, const Vec3& t = Vec3::all(0)); Affine3(const Vec3& rvec, const Vec3& t = Vec3::all(0));
//Combines all contructors above. Supports 4x4, 4x3, 3x3, 1x3, 3x1 sizes of data matrix /**
* Combines all constructors above. Supports 4x4, 3x4, 3x3, 1x3, 3x1 sizes of data matrix.
*
* The last row of the current matrix is set to [0,0,0,1] when data is not 4x4.
*
* @param data 1-channel matrix.
* when it is 4x4, it is copied to the current matrix and t is not used.
* When it is 3x4, it is copied to the upper part 3x4 of the current matrix and t is not used.
* When it is 3x3, it is copied to the upper left 3x3 part of the current matrix.
* When it is 3x1 or 1x3, it is treated as a rotation vector and the Rodrigues formula is used
* to compute a 3x3 rotation matrix.
* @param t 3x1 translation vector. It is used only when data is neither 4x4 nor 3x4.
*/
explicit Affine3(const Mat& data, const Vec3& t = Vec3::all(0)); explicit Affine3(const Mat& data, const Vec3& t = Vec3::all(0));
//From 16th element array //! From 16-element array
explicit Affine3(const float_type* vals); explicit Affine3(const float_type* vals);
//! Create an 4x4 identity transform
static Affine3 Identity(); static Affine3 Identity();
//Rotation matrix /**
* Rotation matrix.
*
* Copy the rotation matrix to the upper left 3x3 part of the current matrix.
* The remaining elements of the current matrix are not changed.
*
* @param R 3x3 rotation matrix.
*
*/
void rotation(const Mat3& R); void rotation(const Mat3& R);
//Rodrigues vector /**
* Rodrigues vector.
*
* It sets the upper left 3x3 part of the matrix. The remaining part is unaffected.
*
* @param rvec 3x1 rotation vector. The direction indicates the rotation axis and
* its length indicates the rotation angle in radian (using the right thumb convention).
*/
void rotation(const Vec3& rvec); void rotation(const Vec3& rvec);
//Combines rotation methods above. Suports 3x3, 1x3, 3x1 sizes of data matrix; /**
* Combines rotation methods above. Supports 3x3, 1x3, 3x1 sizes of data matrix.
*
* It sets the upper left 3x3 part of the matrix. The remaining part is unaffected.
*
* @param data 1-channel matrix.
* When it is a 3x3 matrix, it sets the upper left 3x3 part of the current matrix.
* When it is a 1x3 or 3x1 matrix, it is used as a rotation vector. The Rodrigues formula
* is used to compute the rotation matrix and sets the upper left 3x3 part of the current matrix.
*/
void rotation(const Mat& data); void rotation(const Mat& data);
/**
* Copy the 3x3 matrix L to the upper left part of the current matrix
*
* It sets the upper left 3x3 part of the matrix. The remaining part is unaffected.
*
* @param L 3x3 matrix.
*/
void linear(const Mat3& L); void linear(const Mat3& L);
/**
* Copy t to the first three elements of the last column of the current matrix
*
* It sets the upper right 3x1 part of the matrix. The remaining part is unaffected.
*
* @param t 3x1 translation vector.
*/
void translation(const Vec3& t); void translation(const Vec3& t);
//! @return the upper left 3x3 part
Mat3 rotation() const; Mat3 rotation() const;
//! @return the upper left 3x3 part
Mat3 linear() const; Mat3 linear() const;
//! @return the upper right 3x1 part
Vec3 translation() const; Vec3 translation() const;
//Rodrigues vector //! Rodrigues vector.
//! @return a vector representing the upper left 3x3 rotation matrix of the current matrix.
//! @warning Since the mapping between rotation vectors and rotation matrices is many to one,
//! this function returns only one rotation vector that represents the current rotation matrix,
//! which is not necessarily the same one set by `rotation(const Vec3& rvec)`.
Vec3 rvec() const; Vec3 rvec() const;
//! @return the inverse of the current matrix.
Affine3 inv(int method = cv::DECOMP_SVD) const; Affine3 inv(int method = cv::DECOMP_SVD) const;
// a.rotate(R) is equivalent to Affine(R, 0) * a; //! a.rotate(R) is equivalent to Affine(R, 0) * a;
Affine3 rotate(const Mat3& R) const; Affine3 rotate(const Mat3& R) const;
// a.rotate(R) is equivalent to Affine(rvec, 0) * a; //! a.rotate(rvec) is equivalent to Affine(rvec, 0) * a;
Affine3 rotate(const Vec3& rvec) const; Affine3 rotate(const Vec3& rvec) const;
// a.translate(t) is equivalent to Affine(E, t) * a; //! a.translate(t) is equivalent to Affine(E, t) * a, where E is an identity matrix
Affine3 translate(const Vec3& t) const; Affine3 translate(const Vec3& t) const;
// a.concatenate(affine) is equivalent to affine * a; //! a.concatenate(affine) is equivalent to affine * a;
Affine3 concatenate(const Affine3& affine) const; Affine3 concatenate(const Affine3& affine) const;
template <typename Y> operator Affine3<Y>() const; template <typename Y> operator Affine3<Y>() const;
@ -128,6 +283,7 @@ namespace cv
template<typename T> static template<typename T> static
Affine3<T> operator*(const Affine3<T>& affine1, const Affine3<T>& affine2); Affine3<T> operator*(const Affine3<T>& affine1, const Affine3<T>& affine2);
//! V is a 3-element vector with member fields x, y and z
template<typename T, typename V> static template<typename T, typename V> static
V operator*(const Affine3<T>& affine, const V& vector); V operator*(const Affine3<T>& affine, const V& vector);
@ -145,19 +301,32 @@ namespace cv
typedef _Tp channel_type; typedef _Tp channel_type;
enum { generic_type = 0, enum { generic_type = 0,
depth = DataType<channel_type>::depth,
channels = 16, channels = 16,
fmt = DataType<channel_type>::fmt + ((channels - 1) << 8), fmt = traits::SafeFmt<channel_type>::fmt + ((channels - 1) << 8)
type = CV_MAKETYPE(depth, channels) #ifdef OPENCV_TRAITS_ENABLE_DEPRECATED
,depth = DataType<channel_type>::depth
,type = CV_MAKETYPE(depth, channels)
#endif
}; };
typedef Vec<channel_type, channels> vec_type; typedef Vec<channel_type, channels> vec_type;
}; };
namespace traits {
template<typename _Tp>
struct Depth< Affine3<_Tp> > { enum { value = Depth<_Tp>::value }; };
template<typename _Tp>
struct Type< Affine3<_Tp> > { enum { value = CV_MAKETYPE(Depth<_Tp>::value, 16) }; };
} // namespace
//! @} core
} }
//! @cond IGNORED
/////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////
/// Implementaiton // Implementation
template<typename T> inline template<typename T> inline
cv::Affine3<T>::Affine3() cv::Affine3<T>::Affine3()
@ -190,7 +359,8 @@ cv::Affine3<T>::Affine3(const Vec3& _rvec, const Vec3& t)
template<typename T> inline template<typename T> inline
cv::Affine3<T>::Affine3(const cv::Mat& data, const Vec3& t) cv::Affine3<T>::Affine3(const cv::Mat& data, const Vec3& t)
{ {
CV_Assert(data.type() == cv::DataType<T>::type); CV_Assert(data.type() == cv::traits::Type<T>::value);
CV_Assert(data.channels() == 1);
if (data.cols == 4 && data.rows == 4) if (data.cols == 4 && data.rows == 4)
{ {
@ -201,11 +371,13 @@ cv::Affine3<T>::Affine3(const cv::Mat& data, const Vec3& t)
{ {
rotation(data(Rect(0, 0, 3, 3))); rotation(data(Rect(0, 0, 3, 3)));
translation(data(Rect(3, 0, 1, 3))); translation(data(Rect(3, 0, 1, 3)));
return; }
else
{
rotation(data);
translation(t);
} }
rotation(data);
translation(t);
matrix.val[12] = matrix.val[13] = matrix.val[14] = 0; matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
matrix.val[15] = 1; matrix.val[15] = 1;
} }
@ -229,40 +401,36 @@ void cv::Affine3<T>::rotation(const Mat3& R)
template<typename T> inline template<typename T> inline
void cv::Affine3<T>::rotation(const Vec3& _rvec) void cv::Affine3<T>::rotation(const Vec3& _rvec)
{ {
double rx = _rvec[0], ry = _rvec[1], rz = _rvec[2]; double theta = norm(_rvec);
double theta = std::sqrt(rx*rx + ry*ry + rz*rz);
if (theta < DBL_EPSILON) if (theta < DBL_EPSILON)
rotation(Mat3::eye()); rotation(Mat3::eye());
else else
{ {
const double I[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
double c = std::cos(theta); double c = std::cos(theta);
double s = std::sin(theta); double s = std::sin(theta);
double c1 = 1. - c; double c1 = 1. - c;
double itheta = theta ? 1./theta : 0.; double itheta = (theta != 0) ? 1./theta : 0.;
rx *= itheta; ry *= itheta; rz *= itheta; Point3_<T> r = _rvec*itheta;
double rrt[] = { rx*rx, rx*ry, rx*rz, rx*ry, ry*ry, ry*rz, rx*rz, ry*rz, rz*rz }; Mat3 rrt( r.x*r.x, r.x*r.y, r.x*r.z, r.x*r.y, r.y*r.y, r.y*r.z, r.x*r.z, r.y*r.z, r.z*r.z );
double _r_x_[] = { 0, -rz, ry, rz, 0, -rx, -ry, rx, 0 }; Mat3 r_x( 0, -r.z, r.y, r.z, 0, -r.x, -r.y, r.x, 0 );
Mat3 R;
// R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x] // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x]
// where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0] // where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0]
for(int k = 0; k < 9; ++k) Mat3 R = c*Mat3::eye() + c1*rrt + s*r_x;
R.val[k] = static_cast<float_type>(c*I[k] + c1*rrt[k] + s*_r_x_[k]);
rotation(R); rotation(R);
} }
} }
//Combines rotation methods above. Suports 3x3, 1x3, 3x1 sizes of data matrix; //Combines rotation methods above. Supports 3x3, 1x3, 3x1 sizes of data matrix;
template<typename T> inline template<typename T> inline
void cv::Affine3<T>::rotation(const cv::Mat& data) void cv::Affine3<T>::rotation(const cv::Mat& data)
{ {
CV_Assert(data.type() == cv::DataType<T>::type); CV_Assert(data.type() == cv::traits::Type<T>::value);
CV_Assert(data.channels() == 1);
if (data.cols == 3 && data.rows == 3) if (data.cols == 3 && data.rows == 3)
{ {
@ -277,7 +445,7 @@ void cv::Affine3<T>::rotation(const cv::Mat& data)
rotation(_rvec); rotation(_rvec);
} }
else else
CV_Assert(!"Input marix can be 3x3, 1x3 or 3x1"); CV_Error(Error::StsError, "Input matrix can only be 3x3, 1x3 or 3x1");
} }
template<typename T> inline template<typename T> inline
@ -476,21 +644,21 @@ cv::Vec3d cv::operator*(const cv::Affine3d& affine, const cv::Vec3d& v)
template<typename T> inline template<typename T> inline
cv::Affine3<T>::Affine3(const Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>& affine) cv::Affine3<T>::Affine3(const Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>& affine)
{ {
cv::Mat(4, 4, cv::DataType<T>::type, affine.matrix().data()).copyTo(matrix); cv::Mat(4, 4, cv::traits::Type<T>::value, affine.matrix().data()).copyTo(matrix);
} }
template<typename T> inline template<typename T> inline
cv::Affine3<T>::Affine3(const Eigen::Transform<T, 3, Eigen::Affine>& affine) cv::Affine3<T>::Affine3(const Eigen::Transform<T, 3, Eigen::Affine>& affine)
{ {
Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)> a = affine; Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)> a = affine;
cv::Mat(4, 4, cv::DataType<T>::type, a.matrix().data()).copyTo(matrix); cv::Mat(4, 4, cv::traits::Type<T>::value, a.matrix().data()).copyTo(matrix);
} }
template<typename T> inline template<typename T> inline
cv::Affine3<T>::operator Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>() const cv::Affine3<T>::operator Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>() const
{ {
Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)> r; Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)> r;
cv::Mat hdr(4, 4, cv::DataType<T>::type, r.matrix().data()); cv::Mat hdr(4, 4, cv::traits::Type<T>::value, r.matrix().data());
cv::Mat(matrix, false).copyTo(hdr); cv::Mat(matrix, false).copyTo(hdr);
return r; return r;
} }
@ -503,7 +671,8 @@ cv::Affine3<T>::operator Eigen::Transform<T, 3, Eigen::Affine>() const
#endif /* defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H */ #endif /* defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H */
//! @endcond
#endif /* __cplusplus */ #endif /* __cplusplus */
#endif /* __OPENCV_CORE_AFFINE3_HPP__ */ #endif /* OPENCV_CORE_AFFINE3_HPP */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -40,13 +41,11 @@
// //
//M*/ //M*/
#ifndef __OPENCV_CORE_EIGEN_HPP__
#define __OPENCV_CORE_EIGEN_HPP__
#ifdef __cplusplus #ifndef OPENCV_CORE_EIGEN_HPP
#define OPENCV_CORE_EIGEN_HPP
#include "opencv2/core/core_c.h" #include "opencv2/core.hpp"
#include "opencv2/core/core.hpp"
#if defined _MSC_VER && _MSC_VER >= 1200 #if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4714 ) //__forceinline is not inlined #pragma warning( disable: 4714 ) //__forceinline is not inlined
@ -57,32 +56,50 @@
namespace cv namespace cv
{ {
template<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols> //! @addtogroup core_eigen
void eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& src, Mat& dst ) //! @{
template<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols> static inline
void eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& src, OutputArray dst )
{ {
if( !(src.Flags & Eigen::RowMajorBit) ) if( !(src.Flags & Eigen::RowMajorBit) )
{ {
Mat _src(src.cols(), src.rows(), DataType<_Tp>::type, Mat _src(src.cols(), src.rows(), traits::Type<_Tp>::value,
(void*)src.data(), src.stride()*sizeof(_Tp)); (void*)src.data(), src.outerStride()*sizeof(_Tp));
transpose(_src, dst); transpose(_src, dst);
} }
else else
{ {
Mat _src(src.rows(), src.cols(), DataType<_Tp>::type, Mat _src(src.rows(), src.cols(), traits::Type<_Tp>::value,
(void*)src.data(), src.stride()*sizeof(_Tp)); (void*)src.data(), src.outerStride()*sizeof(_Tp));
_src.copyTo(dst); _src.copyTo(dst);
} }
} }
template<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols> // Matx case
template<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols> static inline
void eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& src,
Matx<_Tp, _rows, _cols>& dst )
{
if( !(src.Flags & Eigen::RowMajorBit) )
{
dst = Matx<_Tp, _cols, _rows>(static_cast<const _Tp*>(src.data())).t();
}
else
{
dst = Matx<_Tp, _rows, _cols>(static_cast<const _Tp*>(src.data()));
}
}
template<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols> static inline
void cv2eigen( const Mat& src, void cv2eigen( const Mat& src,
Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst ) Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst )
{ {
CV_DbgAssert(src.rows == _rows && src.cols == _cols); CV_DbgAssert(src.rows == _rows && src.cols == _cols);
if( !(dst.Flags & Eigen::RowMajorBit) ) if( !(dst.Flags & Eigen::RowMajorBit) )
{ {
Mat _dst(src.cols, src.rows, DataType<_Tp>::type, const Mat _dst(src.cols, src.rows, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
if( src.type() == _dst.type() ) if( src.type() == _dst.type() )
transpose(src, _dst); transpose(src, _dst);
else if( src.cols == src.rows ) else if( src.cols == src.rows )
@ -92,47 +109,43 @@ void cv2eigen( const Mat& src,
} }
else else
Mat(src.t()).convertTo(_dst, _dst.type()); Mat(src.t()).convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
else else
{ {
Mat _dst(src.rows, src.cols, DataType<_Tp>::type, const Mat _dst(src.rows, src.cols, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
src.convertTo(_dst, _dst.type()); src.convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
} }
// Matx case // Matx case
template<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols> template<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols> static inline
void cv2eigen( const Matx<_Tp, _rows, _cols>& src, void cv2eigen( const Matx<_Tp, _rows, _cols>& src,
Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst ) Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst )
{ {
if( !(dst.Flags & Eigen::RowMajorBit) ) if( !(dst.Flags & Eigen::RowMajorBit) )
{ {
Mat _dst(_cols, _rows, DataType<_Tp>::type, const Mat _dst(_cols, _rows, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
transpose(src, _dst); transpose(src, _dst);
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
else else
{ {
Mat _dst(_rows, _cols, DataType<_Tp>::type, const Mat _dst(_rows, _cols, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
Mat(src).copyTo(_dst); Mat(src).copyTo(_dst);
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
} }
template<typename _Tp> template<typename _Tp> static inline
void cv2eigen( const Mat& src, void cv2eigen( const Mat& src,
Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst ) Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst )
{ {
dst.resize(src.rows, src.cols); dst.resize(src.rows, src.cols);
if( !(dst.Flags & Eigen::RowMajorBit) ) if( !(dst.Flags & Eigen::RowMajorBit) )
{ {
Mat _dst(src.cols, src.rows, DataType<_Tp>::type, const Mat _dst(src.cols, src.rows, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
if( src.type() == _dst.type() ) if( src.type() == _dst.type() )
transpose(src, _dst); transpose(src, _dst);
else if( src.cols == src.rows ) else if( src.cols == src.rows )
@ -142,40 +155,36 @@ void cv2eigen( const Mat& src,
} }
else else
Mat(src.t()).convertTo(_dst, _dst.type()); Mat(src.t()).convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
else else
{ {
Mat _dst(src.rows, src.cols, DataType<_Tp>::type, const Mat _dst(src.rows, src.cols, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
src.convertTo(_dst, _dst.type()); src.convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
} }
// Matx case // Matx case
template<typename _Tp, int _rows, int _cols> template<typename _Tp, int _rows, int _cols> static inline
void cv2eigen( const Matx<_Tp, _rows, _cols>& src, void cv2eigen( const Matx<_Tp, _rows, _cols>& src,
Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst ) Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst )
{ {
dst.resize(_rows, _cols); dst.resize(_rows, _cols);
if( !(dst.Flags & Eigen::RowMajorBit) ) if( !(dst.Flags & Eigen::RowMajorBit) )
{ {
Mat _dst(_cols, _rows, DataType<_Tp>::type, const Mat _dst(_cols, _rows, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
transpose(src, _dst); transpose(src, _dst);
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
else else
{ {
Mat _dst(_rows, _cols, DataType<_Tp>::type, const Mat _dst(_rows, _cols, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
Mat(src).copyTo(_dst); Mat(src).copyTo(_dst);
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
} }
template<typename _Tp> template<typename _Tp> static inline
void cv2eigen( const Mat& src, void cv2eigen( const Mat& src,
Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst ) Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst )
{ {
@ -184,25 +193,23 @@ void cv2eigen( const Mat& src,
if( !(dst.Flags & Eigen::RowMajorBit) ) if( !(dst.Flags & Eigen::RowMajorBit) )
{ {
Mat _dst(src.cols, src.rows, DataType<_Tp>::type, const Mat _dst(src.cols, src.rows, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
if( src.type() == _dst.type() ) if( src.type() == _dst.type() )
transpose(src, _dst); transpose(src, _dst);
else else
Mat(src.t()).convertTo(_dst, _dst.type()); Mat(src.t()).convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
else else
{ {
Mat _dst(src.rows, src.cols, DataType<_Tp>::type, const Mat _dst(src.rows, src.cols, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
src.convertTo(_dst, _dst.type()); src.convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
} }
// Matx case // Matx case
template<typename _Tp, int _rows> template<typename _Tp, int _rows> static inline
void cv2eigen( const Matx<_Tp, _rows, 1>& src, void cv2eigen( const Matx<_Tp, _rows, 1>& src,
Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst ) Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst )
{ {
@ -210,22 +217,20 @@ void cv2eigen( const Matx<_Tp, _rows, 1>& src,
if( !(dst.Flags & Eigen::RowMajorBit) ) if( !(dst.Flags & Eigen::RowMajorBit) )
{ {
Mat _dst(1, _rows, DataType<_Tp>::type, const Mat _dst(1, _rows, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
transpose(src, _dst); transpose(src, _dst);
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
else else
{ {
Mat _dst(_rows, 1, DataType<_Tp>::type, const Mat _dst(_rows, 1, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
src.copyTo(_dst); src.copyTo(_dst);
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
} }
template<typename _Tp> template<typename _Tp> static inline
void cv2eigen( const Mat& src, void cv2eigen( const Mat& src,
Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst ) Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst )
{ {
@ -233,48 +238,43 @@ void cv2eigen( const Mat& src,
dst.resize(src.cols); dst.resize(src.cols);
if( !(dst.Flags & Eigen::RowMajorBit) ) if( !(dst.Flags & Eigen::RowMajorBit) )
{ {
Mat _dst(src.cols, src.rows, DataType<_Tp>::type, const Mat _dst(src.cols, src.rows, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
if( src.type() == _dst.type() ) if( src.type() == _dst.type() )
transpose(src, _dst); transpose(src, _dst);
else else
Mat(src.t()).convertTo(_dst, _dst.type()); Mat(src.t()).convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
else else
{ {
Mat _dst(src.rows, src.cols, DataType<_Tp>::type, const Mat _dst(src.rows, src.cols, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
src.convertTo(_dst, _dst.type()); src.convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
} }
//Matx //Matx
template<typename _Tp, int _cols> template<typename _Tp, int _cols> static inline
void cv2eigen( const Matx<_Tp, 1, _cols>& src, void cv2eigen( const Matx<_Tp, 1, _cols>& src,
Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst ) Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst )
{ {
dst.resize(_cols); dst.resize(_cols);
if( !(dst.Flags & Eigen::RowMajorBit) ) if( !(dst.Flags & Eigen::RowMajorBit) )
{ {
Mat _dst(_cols, 1, DataType<_Tp>::type, const Mat _dst(_cols, 1, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
transpose(src, _dst); transpose(src, _dst);
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
else else
{ {
Mat _dst(1, _cols, DataType<_Tp>::type, const Mat _dst(1, _cols, traits::Type<_Tp>::value,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
Mat(src).copyTo(_dst); Mat(src).copyTo(_dst);
CV_DbgAssert(_dst.data == (uchar*)dst.data());
} }
} }
//! @}
} } // cv
#endif
#endif #endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,72 +1,26 @@
/*M/////////////////////////////////////////////////////////////////////////////////////// // This file is part of OpenCV project.
// // It is subject to the license terms in the LICENSE file found in the top-level directory
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // of this distribution and at http://opencv.org/license.html.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright( C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
//(including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort(including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/* #ifndef OPENCV_VERSION_HPP
definition of the current version of OpenCV #define OPENCV_VERSION_HPP
Usefull to test in user programs
*/
#ifndef __OPENCV_VERSION_HPP__ #define CV_VERSION_MAJOR 3
#define __OPENCV_VERSION_HPP__ #define CV_VERSION_MINOR 4
#define CV_VERSION_REVISION 6
#define CV_VERSION_EPOCH 2 #define CV_VERSION_STATUS ""
#define CV_VERSION_MAJOR 4
#define CV_VERSION_MINOR 10
#define CV_VERSION_REVISION 0
#define CVAUX_STR_EXP(__A) #__A #define CVAUX_STR_EXP(__A) #__A
#define CVAUX_STR(__A) CVAUX_STR_EXP(__A) #define CVAUX_STR(__A) CVAUX_STR_EXP(__A)
#define CVAUX_STRW_EXP(__A) L#__A #define CVAUX_STRW_EXP(__A) L ## #__A
#define CVAUX_STRW(__A) CVAUX_STRW_EXP(__A) #define CVAUX_STRW(__A) CVAUX_STRW_EXP(__A)
#if CV_VERSION_REVISION #define CV_VERSION CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR) "." CVAUX_STR(CV_VERSION_REVISION) CV_VERSION_STATUS
# define CV_VERSION CVAUX_STR(CV_VERSION_EPOCH) "." CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR) "." CVAUX_STR(CV_VERSION_REVISION)
#else
# define CV_VERSION CVAUX_STR(CV_VERSION_EPOCH) "." CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR)
#endif
/* old style version constants*/ /* old style version constants*/
#define CV_MAJOR_VERSION CV_VERSION_EPOCH #define CV_MAJOR_VERSION CV_VERSION_MAJOR
#define CV_MINOR_VERSION CV_VERSION_MAJOR #define CV_MINOR_VERSION CV_VERSION_MINOR
#define CV_SUBMINOR_VERSION CV_VERSION_MINOR #define CV_SUBMINOR_VERSION CV_VERSION_REVISION
#endif #endif // OPENCV_VERSION_HPP

View File

@ -1,4 +1,4 @@
/////////////////////////////////////////////////////////////////////////////// /*M//////////////////////////////////////////////////////////////////////////////
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
// //
// By downloading, copying, installing or using the software you agree to // By downloading, copying, installing or using the software you agree to
@ -36,69 +36,11 @@
// and on any theory of liability, whether in contract, strict liability, // and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of // or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage. // the use of this software, even if advised of the possibility of such damage.
///////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////
// //M*/
// Image class which provides a thin layer around an IplImage. The goals
// of the class design are: #ifndef OPENCV_CORE_WIMAGE_HPP
// 1. All the data has explicit ownership to avoid memory leaks #define OPENCV_CORE_WIMAGE_HPP
// 2. No hidden allocations or copies for performance.
// 3. Easy access to OpenCV methods (which will access IPP if available)
// 4. Can easily treat external data as an image
// 5. Easy to create images which are subsets of other images
// 6. Fast pixel access which can take advantage of number of channels
// if known at compile time.
//
// The WImage class is the image class which provides the data accessors.
// The 'W' comes from the fact that it is also a wrapper around the popular
// but inconvenient IplImage class. A WImage can be constructed either using a
// WImageBuffer class which allocates and frees the data,
// or using a WImageView class which constructs a subimage or a view into
// external data. The view class does no memory management. Each class
// actually has two versions, one when the number of channels is known at
// compile time and one when it isn't. Using the one with the number of
// channels specified can provide some compile time optimizations by using the
// fact that the number of channels is a constant.
//
// We use the convention (c,r) to refer to column c and row r with (0,0) being
// the upper left corner. This is similar to standard Euclidean coordinates
// with the first coordinate varying in the horizontal direction and the second
// coordinate varying in the vertical direction.
// Thus (c,r) is usually in the domain [0, width) X [0, height)
//
// Example usage:
// WImageBuffer3_b im(5,7); // Make a 5X7 3 channel image of type uchar
// WImageView3_b sub_im(im, 2,2, 3,3); // 3X3 submatrix
// vector<float> vec(10, 3.0f);
// WImageView1_f user_im(&vec[0], 2, 5); // 2X5 image w/ supplied data
//
// im.SetZero(); // same as cvSetZero(im.Ipl())
// *im(2, 3) = 15; // Modify the element at column 2, row 3
// MySetRand(&sub_im);
//
// // Copy the second row into the first. This can be done with no memory
// // allocation and will use SSE if IPP is available.
// int w = im.Width();
// im.View(0,0, w,1).CopyFrom(im.View(0,1, w,1));
//
// // Doesn't care about source of data since using WImage
// void MySetRand(WImage_b* im) { // Works with any number of channels
// for (int r = 0; r < im->Height(); ++r) {
// float* row = im->Row(r);
// for (int c = 0; c < im->Width(); ++c) {
// for (int ch = 0; ch < im->Channels(); ++ch, ++row) {
// *row = uchar(rand() & 255);
// }
// }
// }
// }
//
// Functions that are not part of the basic image allocation, viewing, and
// access should come from OpenCV, except some useful functions that are not
// part of OpenCV can be found in wimage_util.h
#ifndef __OPENCV_CORE_WIMAGE_HPP__
#define __OPENCV_CORE_WIMAGE_HPP__
#include "opencv2/core/core_c.h" #include "opencv2/core/core_c.h"
@ -106,6 +48,9 @@
namespace cv { namespace cv {
//! @addtogroup core
//! @{
template <typename T> class WImage; template <typename T> class WImage;
template <typename T> class WImageBuffer; template <typename T> class WImageBuffer;
template <typename T> class WImageView; template <typename T> class WImageView;
@ -165,12 +110,63 @@ typedef WImageC<ushort, 3> WImage3_16u;
typedef WImageViewC<ushort, 3> WImageView3_16u; typedef WImageViewC<ushort, 3> WImageView3_16u;
typedef WImageBufferC<ushort, 3> WImageBuffer3_16u; typedef WImageBufferC<ushort, 3> WImageBuffer3_16u;
// /** @brief Image class which provides a thin layer around an IplImage.
// WImage definitions
// The goals of the class design are:
// This WImage class gives access to the data it refers to. It can be
// constructed either by allocating the data with a WImageBuffer class or -# All the data has explicit ownership to avoid memory leaks
// using the WImageView class to refer to a subimage or outside data. -# No hidden allocations or copies for performance.
-# Easy access to OpenCV methods (which will access IPP if available)
-# Can easily treat external data as an image
-# Easy to create images which are subsets of other images
-# Fast pixel access which can take advantage of number of channels if known at compile time.
The WImage class is the image class which provides the data accessors. The 'W' comes from the fact
that it is also a wrapper around the popular but inconvenient IplImage class. A WImage can be
constructed either using a WImageBuffer class which allocates and frees the data, or using a
WImageView class which constructs a subimage or a view into external data. The view class does no
memory management. Each class actually has two versions, one when the number of channels is known
at compile time and one when it isn't. Using the one with the number of channels specified can
provide some compile time optimizations by using the fact that the number of channels is a
constant.
We use the convention (c,r) to refer to column c and row r with (0,0) being the upper left corner.
This is similar to standard Euclidean coordinates with the first coordinate varying in the
horizontal direction and the second coordinate varying in the vertical direction. Thus (c,r) is
usually in the domain [0, width) X [0, height)
Example usage:
@code
WImageBuffer3_b im(5,7); // Make a 5X7 3 channel image of type uchar
WImageView3_b sub_im(im, 2,2, 3,3); // 3X3 submatrix
vector<float> vec(10, 3.0f);
WImageView1_f user_im(&vec[0], 2, 5); // 2X5 image w/ supplied data
im.SetZero(); // same as cvSetZero(im.Ipl())
*im(2, 3) = 15; // Modify the element at column 2, row 3
MySetRand(&sub_im);
// Copy the second row into the first. This can be done with no memory
// allocation and will use SSE if IPP is available.
int w = im.Width();
im.View(0,0, w,1).CopyFrom(im.View(0,1, w,1));
// Doesn't care about source of data since using WImage
void MySetRand(WImage_b* im) { // Works with any number of channels
for (int r = 0; r < im->Height(); ++r) {
float* row = im->Row(r);
for (int c = 0; c < im->Width(); ++c) {
for (int ch = 0; ch < im->Channels(); ++ch, ++row) {
*row = uchar(rand() & 255);
}
}
}
}
@endcode
Functions that are not part of the basic image allocation, viewing, and access should come from
OpenCV, except some useful functions that are not part of OpenCV can be found in wimage_util.h
*/
template<typename T> template<typename T>
class WImage class WImage
{ {
@ -252,10 +248,10 @@ protected:
}; };
/** Image class when both the pixel type and number of channels
// Image class when both the pixel type and number of channels are known at compile time. This wrapper will speed up some of the operations
// are known at compile time. This wrapper will speed up some of the operations like accessing individual pixels using the () operator.
// like accessing individual pixels using the () operator. */
template<typename T, int C> template<typename T, int C>
class WImageC : public WImage<T> class WImageC : public WImage<T>
{ {
@ -292,12 +288,9 @@ protected:
} }
}; };
// /** Image class which owns the data, so it can be allocated and is always
// WImageBuffer definitions freed. It cannot be copied but can be explicitly cloned.
// */
// Image class which owns the data, so it can be allocated and is always
// freed. It cannot be copied but can be explicity cloned.
//
template<typename T> template<typename T>
class WImageBuffer : public WImage<T> class WImageBuffer : public WImage<T>
{ {
@ -352,8 +345,8 @@ private:
void operator=(const WImageBuffer&); void operator=(const WImageBuffer&);
}; };
// Like a WImageBuffer class but when the number of channels is known /** Like a WImageBuffer class but when the number of channels is known at compile time.
// at compile time. */
template<typename T, int C> template<typename T, int C>
class WImageBufferC : public WImageC<T, C> class WImageBufferC : public WImageC<T, C>
{ {
@ -409,14 +402,10 @@ private:
void operator=(const WImageBufferC&); void operator=(const WImageBufferC&);
}; };
// /** View into an image class which allows treating a subimage as an image or treating external data
// WImageView definitions as an image
// */
// View into an image class which allows treating a subimage as an image template<typename T> class WImageView : public WImage<T>
// or treating external data as an image
//
template<typename T>
class WImageView : public WImage<T>
{ {
public: public:
typedef typename WImage<T>::BaseType BaseType; typedef typename WImage<T>::BaseType BaseType;
@ -518,15 +507,9 @@ inline int WImage<float>::Depth() const {return IPL_DEPTH_32F; }
template<> template<>
inline int WImage<double>::Depth() const {return IPL_DEPTH_64F; } inline int WImage<double>::Depth() const {return IPL_DEPTH_64F; }
//
// Pure virtual destructors still need to be defined.
//
template<typename T> inline WImage<T>::~WImage() {} template<typename T> inline WImage<T>::~WImage() {}
template<typename T, int C> inline WImageC<T, C>::~WImageC() {} template<typename T, int C> inline WImageC<T, C>::~WImageC() {}
//
// Allocate ImageData
//
template<typename T> template<typename T>
inline void WImageBuffer<T>::Allocate(int width, int height, int nchannels) inline void WImageBuffer<T>::Allocate(int width, int height, int nchannels)
{ {
@ -547,9 +530,6 @@ inline void WImageBufferC<T, C>::Allocate(int width, int height)
} }
} }
//
// ImageView methods
//
template<typename T> template<typename T>
WImageView<T>::WImageView(WImage<T>* img, int c, int r, int width, int height) WImageView<T>::WImageView(WImage<T>* img, int c, int r, int width, int height)
: WImage<T>(0) : WImage<T>(0)
@ -614,6 +594,8 @@ WImageViewC<T, C> WImageC<T, C>::View(int c, int r, int width, int height) {
return WImageViewC<T, C>(this, c, r, width, height); return WImageViewC<T, C>(this, c, r, width, height);
} }
//! @} core
} // end of namespace } // end of namespace
#endif // __cplusplus #endif // __cplusplus

View File

@ -97,6 +97,7 @@ public:
blocksize = blockSize; blocksize = blockSize;
remaining = 0; remaining = 0;
base = NULL; base = NULL;
loc = NULL;
usedMemory = 0; usedMemory = 0;
wastedMemory = 0; wastedMemory = 0;
@ -181,6 +182,9 @@ public:
return mem; return mem;
} }
private:
PooledAllocator(const PooledAllocator &); // copy disabled
PooledAllocator& operator=(const PooledAllocator &); // assign disabled
}; };
} }

View File

@ -44,59 +44,60 @@ struct base_any_policy
virtual void clone(void* const* src, void** dest) = 0; virtual void clone(void* const* src, void** dest) = 0;
virtual void move(void* const* src, void** dest) = 0; virtual void move(void* const* src, void** dest) = 0;
virtual void* get_value(void** src) = 0; virtual void* get_value(void** src) = 0;
virtual const void* get_value(void* const * src) = 0;
virtual ::size_t get_size() = 0; virtual ::size_t get_size() = 0;
virtual const std::type_info& type() = 0; virtual const std::type_info& type() = 0;
virtual void print(std::ostream& out, void* const* src) = 0; virtual void print(std::ostream& out, void* const* src) = 0;
#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY
virtual ~base_any_policy() {} virtual ~base_any_policy() {}
#endif
}; };
template<typename T> template<typename T>
struct typed_base_any_policy : base_any_policy struct typed_base_any_policy : base_any_policy
{ {
virtual ::size_t get_size() { return sizeof(T); } virtual ::size_t get_size() CV_OVERRIDE { return sizeof(T); }
virtual const std::type_info& type() { return typeid(T); } virtual const std::type_info& type() CV_OVERRIDE { return typeid(T); }
}; };
template<typename T> template<typename T>
struct small_any_policy : typed_base_any_policy<T> struct small_any_policy CV_FINAL : typed_base_any_policy<T>
{ {
virtual void static_delete(void**) { } virtual void static_delete(void**) CV_OVERRIDE { }
virtual void copy_from_value(void const* src, void** dest) virtual void copy_from_value(void const* src, void** dest) CV_OVERRIDE
{ {
new (dest) T(* reinterpret_cast<T const*>(src)); new (dest) T(* reinterpret_cast<T const*>(src));
} }
virtual void clone(void* const* src, void** dest) { *dest = *src; } virtual void clone(void* const* src, void** dest) CV_OVERRIDE { *dest = *src; }
virtual void move(void* const* src, void** dest) { *dest = *src; } virtual void move(void* const* src, void** dest) CV_OVERRIDE { *dest = *src; }
virtual void* get_value(void** src) { return reinterpret_cast<void*>(src); } virtual void* get_value(void** src) CV_OVERRIDE { return reinterpret_cast<void*>(src); }
virtual void print(std::ostream& out, void* const* src) { out << *reinterpret_cast<T const*>(src); } virtual const void* get_value(void* const * src) CV_OVERRIDE { return reinterpret_cast<const void*>(src); }
virtual void print(std::ostream& out, void* const* src) CV_OVERRIDE { out << *reinterpret_cast<T const*>(src); }
}; };
template<typename T> template<typename T>
struct big_any_policy : typed_base_any_policy<T> struct big_any_policy CV_FINAL : typed_base_any_policy<T>
{ {
virtual void static_delete(void** x) virtual void static_delete(void** x) CV_OVERRIDE
{ {
if (* x) delete (* reinterpret_cast<T**>(x)); *x = NULL; if (* x) delete (* reinterpret_cast<T**>(x));
*x = NULL;
} }
virtual void copy_from_value(void const* src, void** dest) virtual void copy_from_value(void const* src, void** dest) CV_OVERRIDE
{ {
*dest = new T(*reinterpret_cast<T const*>(src)); *dest = new T(*reinterpret_cast<T const*>(src));
} }
virtual void clone(void* const* src, void** dest) virtual void clone(void* const* src, void** dest) CV_OVERRIDE
{ {
*dest = new T(**reinterpret_cast<T* const*>(src)); *dest = new T(**reinterpret_cast<T* const*>(src));
} }
virtual void move(void* const* src, void** dest) virtual void move(void* const* src, void** dest) CV_OVERRIDE
{ {
(*reinterpret_cast<T**>(dest))->~T(); (*reinterpret_cast<T**>(dest))->~T();
**reinterpret_cast<T**>(dest) = **reinterpret_cast<T* const*>(src); **reinterpret_cast<T**>(dest) = **reinterpret_cast<T* const*>(src);
} }
virtual void* get_value(void** src) { return *src; } virtual void* get_value(void** src) CV_OVERRIDE { return *src; }
virtual void print(std::ostream& out, void* const* src) { out << *reinterpret_cast<T const*>(*src); } virtual const void* get_value(void* const * src) CV_OVERRIDE { return *src; }
virtual void print(std::ostream& out, void* const* src) CV_OVERRIDE { out << *reinterpret_cast<T const*>(*src); }
}; };
template<> inline void big_any_policy<flann_centers_init_t>::print(std::ostream& out, void* const* src) template<> inline void big_any_policy<flann_centers_init_t>::print(std::ostream& out, void* const* src)
@ -109,6 +110,11 @@ template<> inline void big_any_policy<flann_algorithm_t>::print(std::ostream& ou
out << int(*reinterpret_cast<flann_algorithm_t const*>(*src)); out << int(*reinterpret_cast<flann_algorithm_t const*>(*src));
} }
template<> inline void big_any_policy<cv::String>::print(std::ostream& out, void* const* src)
{
out << (*reinterpret_cast<cv::String const*>(*src)).c_str();
}
template<typename T> template<typename T>
struct choose_policy struct choose_policy
{ {
@ -150,13 +156,27 @@ SMALL_POLICY(bool);
#undef SMALL_POLICY #undef SMALL_POLICY
/// This function will return a different policy for each type. template <typename T>
template<typename T> class SinglePolicy
base_any_policy* get_policy()
{ {
SinglePolicy();
SinglePolicy(const SinglePolicy& other);
SinglePolicy& operator=(const SinglePolicy& other);
public:
static base_any_policy* get_policy();
private:
static typename choose_policy<T>::type policy; static typename choose_policy<T>::type policy;
return &policy; };
}
template <typename T>
typename choose_policy<T>::type SinglePolicy<T>::policy;
/// This function will return a different policy for each type.
template <typename T>
inline base_any_policy* SinglePolicy<T>::get_policy() { return &policy; }
} // namespace anyimpl } // namespace anyimpl
struct any struct any
@ -170,26 +190,26 @@ public:
/// Initializing constructor. /// Initializing constructor.
template <typename T> template <typename T>
any(const T& x) any(const T& x)
: policy(anyimpl::get_policy<anyimpl::empty_any>()), object(NULL) : policy(anyimpl::SinglePolicy<anyimpl::empty_any>::get_policy()), object(NULL)
{ {
assign(x); assign(x);
} }
/// Empty constructor. /// Empty constructor.
any() any()
: policy(anyimpl::get_policy<anyimpl::empty_any>()), object(NULL) : policy(anyimpl::SinglePolicy<anyimpl::empty_any>::get_policy()), object(NULL)
{ } { }
/// Special initializing constructor for string literals. /// Special initializing constructor for string literals.
any(const char* x) any(const char* x)
: policy(anyimpl::get_policy<anyimpl::empty_any>()), object(NULL) : policy(anyimpl::SinglePolicy<anyimpl::empty_any>::get_policy()), object(NULL)
{ {
assign(x); assign(x);
} }
/// Copy constructor. /// Copy constructor.
any(const any& x) any(const any& x)
: policy(anyimpl::get_policy<anyimpl::empty_any>()), object(NULL) : policy(anyimpl::SinglePolicy<anyimpl::empty_any>::get_policy()), object(NULL)
{ {
assign(x); assign(x);
} }
@ -214,7 +234,7 @@ public:
any& assign(const T& x) any& assign(const T& x)
{ {
reset(); reset();
policy = anyimpl::get_policy<T>(); policy = anyimpl::SinglePolicy<T>::get_policy();
policy->copy_from_value(&x, &object); policy->copy_from_value(&x, &object);
return *this; return *this;
} }
@ -226,6 +246,12 @@ public:
return assign(x); return assign(x);
} }
/// Assignment operator. Template-based version above doesn't work as expected. We need regular assignment operator here.
any& operator=(const any& x)
{
return assign(x);
}
/// Assignment operator, specialed for literal strings. /// Assignment operator, specialed for literal strings.
/// They have types like const char [6] which don't work as expected. /// They have types like const char [6] which don't work as expected.
any& operator=(const char* x) any& operator=(const char* x)
@ -255,7 +281,7 @@ public:
const T& cast() const const T& cast() const
{ {
if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast(); if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast();
T* r = reinterpret_cast<T*>(policy->get_value(const_cast<void **>(&object))); const T* r = reinterpret_cast<const T*>(policy->get_value(&object));
return *r; return *r;
} }
@ -269,7 +295,7 @@ public:
void reset() void reset()
{ {
policy->static_delete(&object); policy->static_delete(&object);
policy = anyimpl::get_policy<anyimpl::empty_any>(); policy = anyimpl::SinglePolicy<anyimpl::empty_any>::get_policy();
} }
/// Returns true if the two types are the same. /// Returns true if the two types are the same.

View File

@ -30,6 +30,8 @@
#ifndef OPENCV_FLANN_AUTOTUNED_INDEX_H_ #ifndef OPENCV_FLANN_AUTOTUNED_INDEX_H_
#define OPENCV_FLANN_AUTOTUNED_INDEX_H_ #define OPENCV_FLANN_AUTOTUNED_INDEX_H_
#include <sstream>
#include "general.h" #include "general.h"
#include "nn_index.h" #include "nn_index.h"
#include "ground_truth.h" #include "ground_truth.h"
@ -81,6 +83,7 @@ public:
memory_weight_ = get_param(params, "memory_weight", 0.0f); memory_weight_ = get_param(params, "memory_weight", 0.0f);
sample_fraction_ = get_param(params,"sample_fraction", 0.1f); sample_fraction_ = get_param(params,"sample_fraction", 0.1f);
bestIndex_ = NULL; bestIndex_ = NULL;
speedup_ = 0;
} }
AutotunedIndex(const AutotunedIndex&); AutotunedIndex(const AutotunedIndex&);
@ -97,7 +100,7 @@ public:
/** /**
* Method responsible with building the index. * Method responsible with building the index.
*/ */
virtual void buildIndex() virtual void buildIndex() CV_OVERRIDE
{ {
std::ostringstream stream; std::ostringstream stream;
bestParams_ = estimateBuildParams(); bestParams_ = estimateBuildParams();
@ -121,7 +124,7 @@ public:
/** /**
* Saves the index to a stream * Saves the index to a stream
*/ */
virtual void saveIndex(FILE* stream) virtual void saveIndex(FILE* stream) CV_OVERRIDE
{ {
save_value(stream, (int)bestIndex_->getType()); save_value(stream, (int)bestIndex_->getType());
bestIndex_->saveIndex(stream); bestIndex_->saveIndex(stream);
@ -131,7 +134,7 @@ public:
/** /**
* Loads the index from a stream * Loads the index from a stream
*/ */
virtual void loadIndex(FILE* stream) virtual void loadIndex(FILE* stream) CV_OVERRIDE
{ {
int index_type; int index_type;
@ -148,7 +151,7 @@ public:
/** /**
* Method that searches for nearest-neighbors * Method that searches for nearest-neighbors
*/ */
virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) CV_OVERRIDE
{ {
int checks = get_param<int>(searchParams,"checks",FLANN_CHECKS_AUTOTUNED); int checks = get_param<int>(searchParams,"checks",FLANN_CHECKS_AUTOTUNED);
if (checks == FLANN_CHECKS_AUTOTUNED) { if (checks == FLANN_CHECKS_AUTOTUNED) {
@ -160,7 +163,7 @@ public:
} }
IndexParams getParameters() const IndexParams getParameters() const CV_OVERRIDE
{ {
return bestIndex_->getParameters(); return bestIndex_->getParameters();
} }
@ -179,7 +182,7 @@ public:
/** /**
* Number of features in this index. * Number of features in this index.
*/ */
virtual size_t size() const virtual size_t size() const CV_OVERRIDE
{ {
return bestIndex_->size(); return bestIndex_->size();
} }
@ -187,7 +190,7 @@ public:
/** /**
* The length of each vector in this index. * The length of each vector in this index.
*/ */
virtual size_t veclen() const virtual size_t veclen() const CV_OVERRIDE
{ {
return bestIndex_->veclen(); return bestIndex_->veclen();
} }
@ -195,7 +198,7 @@ public:
/** /**
* The amount of memory (in bytes) this index uses. * The amount of memory (in bytes) this index uses.
*/ */
virtual int usedMemory() const virtual int usedMemory() const CV_OVERRIDE
{ {
return bestIndex_->usedMemory(); return bestIndex_->usedMemory();
} }
@ -203,7 +206,7 @@ public:
/** /**
* Algorithm name * Algorithm name
*/ */
virtual flann_algorithm_t getType() const virtual flann_algorithm_t getType() const CV_OVERRIDE
{ {
return FLANN_INDEX_AUTOTUNED; return FLANN_INDEX_AUTOTUNED;
} }
@ -274,7 +277,7 @@ private:
// struct KMeansSimpleDownhillFunctor { // struct KMeansSimpleDownhillFunctor {
// //
// Autotune& autotuner; // Autotune& autotuner;
// KMeansSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {}; // KMeansSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {}
// //
// float operator()(int* params) { // float operator()(int* params) {
// //
@ -299,7 +302,7 @@ private:
// struct KDTreeSimpleDownhillFunctor { // struct KDTreeSimpleDownhillFunctor {
// //
// Autotune& autotuner; // Autotune& autotuner;
// KDTreeSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {}; // KDTreeSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {}
// //
// float operator()(int* params) { // float operator()(int* params) {
// float maxFloat = numeric_limits<float>::max(); // float maxFloat = numeric_limits<float>::max();
@ -377,6 +380,7 @@ private:
// evaluate kdtree for all parameter combinations // evaluate kdtree for all parameter combinations
for (size_t i = 0; i < FLANN_ARRAY_LEN(testTrees); ++i) { for (size_t i = 0; i < FLANN_ARRAY_LEN(testTrees); ++i) {
CostData cost; CostData cost;
cost.params["algorithm"] = FLANN_INDEX_KDTREE;
cost.params["trees"] = testTrees[i]; cost.params["trees"] = testTrees[i];
evaluate_kdtree(cost); evaluate_kdtree(cost);

View File

@ -101,7 +101,7 @@ public:
/** /**
* @return The index type * @return The index type
*/ */
flann_algorithm_t getType() const flann_algorithm_t getType() const CV_OVERRIDE
{ {
return FLANN_INDEX_COMPOSITE; return FLANN_INDEX_COMPOSITE;
} }
@ -109,7 +109,7 @@ public:
/** /**
* @return Size of the index * @return Size of the index
*/ */
size_t size() const size_t size() const CV_OVERRIDE
{ {
return kdtree_index_->size(); return kdtree_index_->size();
} }
@ -117,7 +117,7 @@ public:
/** /**
* \returns The dimensionality of the features in this index. * \returns The dimensionality of the features in this index.
*/ */
size_t veclen() const size_t veclen() const CV_OVERRIDE
{ {
return kdtree_index_->veclen(); return kdtree_index_->veclen();
} }
@ -125,7 +125,7 @@ public:
/** /**
* \returns The amount of memory (in bytes) used by the index. * \returns The amount of memory (in bytes) used by the index.
*/ */
int usedMemory() const int usedMemory() const CV_OVERRIDE
{ {
return kmeans_index_->usedMemory() + kdtree_index_->usedMemory(); return kmeans_index_->usedMemory() + kdtree_index_->usedMemory();
} }
@ -133,7 +133,7 @@ public:
/** /**
* \brief Builds the index * \brief Builds the index
*/ */
void buildIndex() void buildIndex() CV_OVERRIDE
{ {
Logger::info("Building kmeans tree...\n"); Logger::info("Building kmeans tree...\n");
kmeans_index_->buildIndex(); kmeans_index_->buildIndex();
@ -145,7 +145,7 @@ public:
* \brief Saves the index to a stream * \brief Saves the index to a stream
* \param stream The stream to save the index to * \param stream The stream to save the index to
*/ */
void saveIndex(FILE* stream) void saveIndex(FILE* stream) CV_OVERRIDE
{ {
kmeans_index_->saveIndex(stream); kmeans_index_->saveIndex(stream);
kdtree_index_->saveIndex(stream); kdtree_index_->saveIndex(stream);
@ -155,7 +155,7 @@ public:
* \brief Loads the index from a stream * \brief Loads the index from a stream
* \param stream The stream from which the index is loaded * \param stream The stream from which the index is loaded
*/ */
void loadIndex(FILE* stream) void loadIndex(FILE* stream) CV_OVERRIDE
{ {
kmeans_index_->loadIndex(stream); kmeans_index_->loadIndex(stream);
kdtree_index_->loadIndex(stream); kdtree_index_->loadIndex(stream);
@ -164,7 +164,7 @@ public:
/** /**
* \returns The index parameters * \returns The index parameters
*/ */
IndexParams getParameters() const IndexParams getParameters() const CV_OVERRIDE
{ {
return index_params_; return index_params_;
} }
@ -172,7 +172,7 @@ public:
/** /**
* \brief Method that searches for nearest-neighbours * \brief Method that searches for nearest-neighbours
*/ */
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) CV_OVERRIDE
{ {
kmeans_index_->findNeighbors(result, vec, searchParams); kmeans_index_->findNeighbors(result, vec, searchParams);
kdtree_index_->findNeighbors(result, vec, searchParams); kdtree_index_->findNeighbors(result, vec, searchParams);

View File

@ -35,7 +35,7 @@
#ifdef FLANN_EXPORT #ifdef FLANN_EXPORT
#undef FLANN_EXPORT #undef FLANN_EXPORT
#endif #endif
#ifdef WIN32 #ifdef _WIN32
/* win32 dll export/import directives */ /* win32 dll export/import directives */
#ifdef FLANN_EXPORTS #ifdef FLANN_EXPORTS
#define FLANN_EXPORT __declspec(dllexport) #define FLANN_EXPORT __declspec(dllexport)
@ -50,19 +50,6 @@
#endif #endif
#ifdef FLANN_DEPRECATED
#undef FLANN_DEPRECATED
#endif
#ifdef __GNUC__
#define FLANN_DEPRECATED __attribute__ ((deprecated))
#elif defined(_MSC_VER)
#define FLANN_DEPRECATED __declspec(deprecated)
#else
#pragma message("WARNING: You need to implement FLANN_DEPRECATED for this compiler")
#define FLANN_DEPRECATED
#endif
#undef FLANN_PLATFORM_32_BIT #undef FLANN_PLATFORM_32_BIT
#undef FLANN_PLATFORM_64_BIT #undef FLANN_PLATFORM_64_BIT
#if defined __amd64__ || defined __x86_64__ || defined _WIN64 || defined _M_X64 #if defined __amd64__ || defined __x86_64__ || defined _WIN64 || defined _M_X64
@ -107,6 +94,7 @@ enum flann_centers_init_t
FLANN_CENTERS_RANDOM = 0, FLANN_CENTERS_RANDOM = 0,
FLANN_CENTERS_GONZALES = 1, FLANN_CENTERS_GONZALES = 1,
FLANN_CENTERS_KMEANSPP = 2, FLANN_CENTERS_KMEANSPP = 2,
FLANN_CENTERS_GROUPWISE = 3,
// deprecated constants, should use the FLANN_CENTERS_* ones instead // deprecated constants, should use the FLANN_CENTERS_* ones instead
CENTERS_RANDOM = 0, CENTERS_RANDOM = 0,

View File

@ -43,11 +43,11 @@ typedef unsigned __int64 uint64_t;
#include "defines.h" #include "defines.h"
#if (defined WIN32 || defined _WIN32) && defined(_M_ARM) #if defined _WIN32 && defined(_M_ARM)
# include <Intrin.h> # include <Intrin.h>
#endif #endif
#ifdef __ARM_NEON__ #if defined(__ARM_NEON__) && !defined(__CUDACC__)
# include "arm_neon.h" # include "arm_neon.h"
#endif #endif
@ -384,41 +384,6 @@ struct HammingLUT
typedef unsigned char ElementType; typedef unsigned char ElementType;
typedef int ResultType; typedef int ResultType;
/** this will count the bits in a ^ b
*/
ResultType operator()(const unsigned char* a, const unsigned char* b, int size) const
{
static const uchar popCountTable[] =
{
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
};
ResultType result = 0;
for (int i = 0; i < size; i++) {
result += popCountTable[a[i] ^ b[i]];
}
return result;
}
};
/**
* Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor
* bit count of A exclusive XOR'ed with B
*/
struct HammingLUT2
{
typedef False is_kdtree_distance;
typedef False is_vector_space_distance;
typedef unsigned char ElementType;
typedef int ResultType;
/** this will count the bits in a ^ b /** this will count the bits in a ^ b
*/ */
ResultType operator()(const unsigned char* a, const unsigned char* b, size_t size) const ResultType operator()(const unsigned char* a, const unsigned char* b, size_t size) const
@ -460,7 +425,7 @@ struct Hamming
ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const
{ {
ResultType result = 0; ResultType result = 0;
#ifdef __ARM_NEON__ #if defined(__ARM_NEON__) && !defined(__CUDACC__)
{ {
uint32x4_t bits = vmovq_n_u32(0); uint32x4_t bits = vmovq_n_u32(0);
for (size_t i = 0; i < size; i += 16) { for (size_t i = 0; i < size; i += 16) {
@ -497,10 +462,9 @@ struct Hamming
} }
} }
#else // NO NEON and NOT GNUC #else // NO NEON and NOT GNUC
typedef unsigned long long pop_t;
HammingLUT lut; HammingLUT lut;
result = lut(reinterpret_cast<const unsigned char*> (a), result = lut(reinterpret_cast<const unsigned char*> (a),
reinterpret_cast<const unsigned char*> (b), size * sizeof(pop_t)); reinterpret_cast<const unsigned char*> (b), size);
#endif #endif
return result; return result;
} }
@ -630,7 +594,7 @@ struct HellingerDistance
typedef typename Accumulator<T>::Type ResultType; typedef typename Accumulator<T>::Type ResultType;
/** /**
* Compute the histogram intersection distance * Compute the Hellinger distance
*/ */
template <typename Iterator1, typename Iterator2> template <typename Iterator1, typename Iterator2>
ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const
@ -663,7 +627,8 @@ struct HellingerDistance
template <typename U, typename V> template <typename U, typename V>
inline ResultType accum_dist(const U& a, const V& b, int) const inline ResultType accum_dist(const U& a, const V& b, int) const
{ {
return sqrt(static_cast<ResultType>(a)) - sqrt(static_cast<ResultType>(b)); ResultType diff = sqrt(static_cast<ResultType>(a)) - sqrt(static_cast<ResultType>(b));
return diff * diff;
} }
}; };
@ -732,7 +697,7 @@ struct KL_Divergence
typedef typename Accumulator<T>::Type ResultType; typedef typename Accumulator<T>::Type ResultType;
/** /**
* Compute the KullbackLeibler divergence * Compute the Kullback-Leibler divergence
*/ */
template <typename Iterator1, typename Iterator2> template <typename Iterator1, typename Iterator2>
ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
@ -741,7 +706,7 @@ struct KL_Divergence
Iterator1 last = a + size; Iterator1 last = a + size;
while (a < last) { while (a < last) {
if (* a != 0) { if (* b != 0) {
ResultType ratio = (ResultType)(*a / *b); ResultType ratio = (ResultType)(*a / *b);
if (ratio>0) { if (ratio>0) {
result += *a * log(ratio); result += *a * log(ratio);
@ -764,9 +729,11 @@ struct KL_Divergence
inline ResultType accum_dist(const U& a, const V& b, int) const inline ResultType accum_dist(const U& a, const V& b, int) const
{ {
ResultType result = ResultType(); ResultType result = ResultType();
ResultType ratio = (ResultType)(a / b); if( *b != 0 ) {
if (ratio>0) { ResultType ratio = (ResultType)(a / b);
result = a * log(ratio); if (ratio>0) {
result = a * log(ratio);
}
} }
return result; return result;
} }
@ -875,7 +842,7 @@ typename Distance::ResultType ensureSquareDistance( typename Distance::ResultTyp
/* /*
* ...and a template to ensure the user that he will process the normal distance, * ...and a template to ensure the user that he will process the normal distance,
* and not squared distance, without loosing processing time calling sqrt(ensureSquareDistance) * and not squared distance, without losing processing time calling sqrt(ensureSquareDistance)
* that will result in doing actually sqrt(dist*dist) for L1 distance for instance. * that will result in doing actually sqrt(dist*dist) for L1 distance for instance.
*/ */
template <typename Distance, typename ElementType> template <typename Distance, typename ElementType>

View File

@ -5,10 +5,7 @@
namespace cvflann namespace cvflann
{ {
#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS CV_DEPRECATED inline void dummyfunc() {}
__declspec(dllexport)
#endif
void dummyfunc();
} }

View File

@ -57,14 +57,14 @@ namespace cvflann {
class DynamicBitset class DynamicBitset
{ {
public: public:
/** @param default constructor /** default constructor
*/ */
DynamicBitset() DynamicBitset() : size_(0)
{ {
} }
/** @param only constructor we use in our code /** only constructor we use in our code
* @param the size of the bitset (in bits) * @param sz the size of the bitset (in bits)
*/ */
DynamicBitset(size_t sz) DynamicBitset(size_t sz)
{ {
@ -87,7 +87,7 @@ public:
return bitset_.empty(); return bitset_.empty();
} }
/** @param set all the bits to 0 /** set all the bits to 0
*/ */
void reset() void reset()
{ {
@ -95,7 +95,7 @@ public:
} }
/** @brief set one bit to 0 /** @brief set one bit to 0
* @param * @param index
*/ */
void reset(size_t index) void reset(size_t index)
{ {
@ -106,15 +106,15 @@ public:
* This function is useful when resetting a given set of bits so that the * This function is useful when resetting a given set of bits so that the
* whole bitset ends up being 0: if that's the case, we don't care about setting * whole bitset ends up being 0: if that's the case, we don't care about setting
* other bits to 0 * other bits to 0
* @param * @param index
*/ */
void reset_block(size_t index) void reset_block(size_t index)
{ {
bitset_[index / cell_bit_size_] = 0; bitset_[index / cell_bit_size_] = 0;
} }
/** @param resize the bitset so that it contains at least size bits /** resize the bitset so that it contains at least sz bits
* @param size * @param sz
*/ */
void resize(size_t sz) void resize(size_t sz)
{ {
@ -122,7 +122,7 @@ public:
bitset_.resize(sz / cell_bit_size_ + 1); bitset_.resize(sz / cell_bit_size_ + 1);
} }
/** @param set a bit to true /** set a bit to true
* @param index the index of the bit to set to 1 * @param index the index of the bit to set to 1
*/ */
void set(size_t index) void set(size_t index)
@ -130,14 +130,14 @@ public:
bitset_[index / cell_bit_size_] |= size_t(1) << (index % cell_bit_size_); bitset_[index / cell_bit_size_] |= size_t(1) << (index % cell_bit_size_);
} }
/** @param gives the number of contained bits /** gives the number of contained bits
*/ */
size_t size() const size_t size() const
{ {
return size_; return size_;
} }
/** @param check if a bit is set /** check if a bit is set
* @param index the index of the bit to check * @param index the index of the bit to check
* @return true if the bit is set * @return true if the bit is set
*/ */

View File

@ -7,11 +7,12 @@
// copy or use the software. // copy or use the software.
// //
// //
// License Agreement // License Agreement
// For Open Source Computer Vision Library // For Open Source Computer Vision Library
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -40,388 +41,8 @@
// //
//M*/ //M*/
#ifndef _OPENCV_FLANN_HPP_ #ifdef __OPENCV_BUILD
#define _OPENCV_FLANN_HPP_ #error this is a compatibility header which should not be used inside the OpenCV library
#ifdef __cplusplus
#include "opencv2/core/types_c.h"
#include "opencv2/core/core.hpp"
#include "opencv2/flann/flann_base.hpp"
#include "opencv2/flann/miniflann.hpp"
namespace cvflann
{
CV_EXPORTS flann_distance_t flann_distance_type();
FLANN_DEPRECATED CV_EXPORTS void set_distance_type(flann_distance_t distance_type, int order);
}
namespace cv
{
namespace flann
{
template <typename T> struct CvType {};
template <> struct CvType<unsigned char> { static int type() { return CV_8U; } };
template <> struct CvType<char> { static int type() { return CV_8S; } };
template <> struct CvType<unsigned short> { static int type() { return CV_16U; } };
template <> struct CvType<short> { static int type() { return CV_16S; } };
template <> struct CvType<int> { static int type() { return CV_32S; } };
template <> struct CvType<float> { static int type() { return CV_32F; } };
template <> struct CvType<double> { static int type() { return CV_64F; } };
// bring the flann parameters into this namespace
using ::cvflann::get_param;
using ::cvflann::print_params;
// bring the flann distances into this namespace
using ::cvflann::L2_Simple;
using ::cvflann::L2;
using ::cvflann::L1;
using ::cvflann::MinkowskiDistance;
using ::cvflann::MaxDistance;
using ::cvflann::HammingLUT;
using ::cvflann::Hamming;
using ::cvflann::Hamming2;
using ::cvflann::HistIntersectionDistance;
using ::cvflann::HellingerDistance;
using ::cvflann::ChiSquareDistance;
using ::cvflann::KL_Divergence;
template <typename Distance>
class GenericIndex
{
public:
typedef typename Distance::ElementType ElementType;
typedef typename Distance::ResultType DistanceType;
GenericIndex(const Mat& features, const ::cvflann::IndexParams& params, Distance distance = Distance());
~GenericIndex();
void knnSearch(const vector<ElementType>& query, vector<int>& indices,
vector<DistanceType>& dists, int knn, const ::cvflann::SearchParams& params);
void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& params);
int radiusSearch(const vector<ElementType>& query, vector<int>& indices,
vector<DistanceType>& dists, DistanceType radius, const ::cvflann::SearchParams& params);
int radiusSearch(const Mat& query, Mat& indices, Mat& dists,
DistanceType radius, const ::cvflann::SearchParams& params);
void save(std::string filename) { nnIndex->save(filename); }
int veclen() const { return nnIndex->veclen(); }
int size() const { return nnIndex->size(); }
::cvflann::IndexParams getParameters() { return nnIndex->getParameters(); }
FLANN_DEPRECATED const ::cvflann::IndexParams* getIndexParameters() { return nnIndex->getIndexParameters(); }
private:
::cvflann::Index<Distance>* nnIndex;
};
#define FLANN_DISTANCE_CHECK \
if ( ::cvflann::flann_distance_type() != cvflann::FLANN_DIST_L2) { \
printf("[WARNING] You are using cv::flann::Index (or cv::flann::GenericIndex) and have also changed "\
"the distance using cvflann::set_distance_type. This is no longer working as expected "\
"(cv::flann::Index always uses L2). You should create the index templated on the distance, "\
"for example for L1 distance use: GenericIndex< L1<float> > \n"); \
}
template <typename Distance>
GenericIndex<Distance>::GenericIndex(const Mat& dataset, const ::cvflann::IndexParams& params, Distance distance)
{
CV_Assert(dataset.type() == CvType<ElementType>::type());
CV_Assert(dataset.isContinuous());
::cvflann::Matrix<ElementType> m_dataset((ElementType*)dataset.ptr<ElementType>(0), dataset.rows, dataset.cols);
nnIndex = new ::cvflann::Index<Distance>(m_dataset, params, distance);
FLANN_DISTANCE_CHECK
nnIndex->buildIndex();
}
template <typename Distance>
GenericIndex<Distance>::~GenericIndex()
{
delete nnIndex;
}
template <typename Distance>
void GenericIndex<Distance>::knnSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, int knn, const ::cvflann::SearchParams& searchParams)
{
::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());
::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());
::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());
FLANN_DISTANCE_CHECK
nnIndex->knnSearch(m_query,m_indices,m_dists,knn,searchParams);
}
template <typename Distance>
void GenericIndex<Distance>::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& searchParams)
{
CV_Assert(queries.type() == CvType<ElementType>::type());
CV_Assert(queries.isContinuous());
::cvflann::Matrix<ElementType> m_queries((ElementType*)queries.ptr<ElementType>(0), queries.rows, queries.cols);
CV_Assert(indices.type() == CV_32S);
CV_Assert(indices.isContinuous());
::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);
CV_Assert(dists.type() == CvType<DistanceType>::type());
CV_Assert(dists.isContinuous());
::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);
FLANN_DISTANCE_CHECK
nnIndex->knnSearch(m_queries,m_indices,m_dists,knn, searchParams);
}
template <typename Distance>
int GenericIndex<Distance>::radiusSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams)
{
::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());
::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());
::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());
FLANN_DISTANCE_CHECK
return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
}
template <typename Distance>
int GenericIndex<Distance>::radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams)
{
CV_Assert(query.type() == CvType<ElementType>::type());
CV_Assert(query.isContinuous());
::cvflann::Matrix<ElementType> m_query((ElementType*)query.ptr<ElementType>(0), query.rows, query.cols);
CV_Assert(indices.type() == CV_32S);
CV_Assert(indices.isContinuous());
::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);
CV_Assert(dists.type() == CvType<DistanceType>::type());
CV_Assert(dists.isContinuous());
::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);
FLANN_DISTANCE_CHECK
return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
}
/**
* @deprecated Use GenericIndex class instead
*/
template <typename T>
class
#ifndef _MSC_VER
FLANN_DEPRECATED
#endif
Index_ {
public:
typedef typename L2<T>::ElementType ElementType;
typedef typename L2<T>::ResultType DistanceType;
Index_(const Mat& features, const ::cvflann::IndexParams& params);
~Index_();
void knnSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, int knn, const ::cvflann::SearchParams& params);
void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& params);
int radiusSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, DistanceType radius, const ::cvflann::SearchParams& params);
int radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& params);
void save(std::string filename)
{
if (nnIndex_L1) nnIndex_L1->save(filename);
if (nnIndex_L2) nnIndex_L2->save(filename);
}
int veclen() const
{
if (nnIndex_L1) return nnIndex_L1->veclen();
if (nnIndex_L2) return nnIndex_L2->veclen();
}
int size() const
{
if (nnIndex_L1) return nnIndex_L1->size();
if (nnIndex_L2) return nnIndex_L2->size();
}
::cvflann::IndexParams getParameters()
{
if (nnIndex_L1) return nnIndex_L1->getParameters();
if (nnIndex_L2) return nnIndex_L2->getParameters();
}
FLANN_DEPRECATED const ::cvflann::IndexParams* getIndexParameters()
{
if (nnIndex_L1) return nnIndex_L1->getIndexParameters();
if (nnIndex_L2) return nnIndex_L2->getIndexParameters();
}
private:
// providing backwards compatibility for L2 and L1 distances (most common)
::cvflann::Index< L2<ElementType> >* nnIndex_L2;
::cvflann::Index< L1<ElementType> >* nnIndex_L1;
};
#ifdef _MSC_VER
template <typename T>
class FLANN_DEPRECATED Index_;
#endif #endif
template <typename T> #include "opencv2/flann.hpp"
Index_<T>::Index_(const Mat& dataset, const ::cvflann::IndexParams& params)
{
printf("[WARNING] The cv::flann::Index_<T> class is deperecated, use cv::flann::GenericIndex<Distance> instead\n");
CV_Assert(dataset.type() == CvType<ElementType>::type());
CV_Assert(dataset.isContinuous());
::cvflann::Matrix<ElementType> m_dataset((ElementType*)dataset.ptr<ElementType>(0), dataset.rows, dataset.cols);
if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L2 ) {
nnIndex_L1 = NULL;
nnIndex_L2 = new ::cvflann::Index< L2<ElementType> >(m_dataset, params);
}
else if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L1 ) {
nnIndex_L1 = new ::cvflann::Index< L1<ElementType> >(m_dataset, params);
nnIndex_L2 = NULL;
}
else {
printf("[ERROR] cv::flann::Index_<T> only provides backwards compatibility for the L1 and L2 distances. "
"For other distance types you must use cv::flann::GenericIndex<Distance>\n");
CV_Assert(0);
}
if (nnIndex_L1) nnIndex_L1->buildIndex();
if (nnIndex_L2) nnIndex_L2->buildIndex();
}
template <typename T>
Index_<T>::~Index_()
{
if (nnIndex_L1) delete nnIndex_L1;
if (nnIndex_L2) delete nnIndex_L2;
}
template <typename T>
void Index_<T>::knnSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, int knn, const ::cvflann::SearchParams& searchParams)
{
::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());
::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());
::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());
if (nnIndex_L1) nnIndex_L1->knnSearch(m_query,m_indices,m_dists,knn,searchParams);
if (nnIndex_L2) nnIndex_L2->knnSearch(m_query,m_indices,m_dists,knn,searchParams);
}
template <typename T>
void Index_<T>::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& searchParams)
{
CV_Assert(queries.type() == CvType<ElementType>::type());
CV_Assert(queries.isContinuous());
::cvflann::Matrix<ElementType> m_queries((ElementType*)queries.ptr<ElementType>(0), queries.rows, queries.cols);
CV_Assert(indices.type() == CV_32S);
CV_Assert(indices.isContinuous());
::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);
CV_Assert(dists.type() == CvType<DistanceType>::type());
CV_Assert(dists.isContinuous());
::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);
if (nnIndex_L1) nnIndex_L1->knnSearch(m_queries,m_indices,m_dists,knn, searchParams);
if (nnIndex_L2) nnIndex_L2->knnSearch(m_queries,m_indices,m_dists,knn, searchParams);
}
template <typename T>
int Index_<T>::radiusSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams)
{
::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());
::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());
::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());
if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
}
template <typename T>
int Index_<T>::radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams)
{
CV_Assert(query.type() == CvType<ElementType>::type());
CV_Assert(query.isContinuous());
::cvflann::Matrix<ElementType> m_query((ElementType*)query.ptr<ElementType>(0), query.rows, query.cols);
CV_Assert(indices.type() == CV_32S);
CV_Assert(indices.isContinuous());
::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);
CV_Assert(dists.type() == CvType<DistanceType>::type());
CV_Assert(dists.isContinuous());
::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);
if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
}
template <typename Distance>
int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params,
Distance d = Distance())
{
typedef typename Distance::ElementType ElementType;
typedef typename Distance::ResultType DistanceType;
CV_Assert(features.type() == CvType<ElementType>::type());
CV_Assert(features.isContinuous());
::cvflann::Matrix<ElementType> m_features((ElementType*)features.ptr<ElementType>(0), features.rows, features.cols);
CV_Assert(centers.type() == CvType<DistanceType>::type());
CV_Assert(centers.isContinuous());
::cvflann::Matrix<DistanceType> m_centers((DistanceType*)centers.ptr<DistanceType>(0), centers.rows, centers.cols);
return ::cvflann::hierarchicalClustering<Distance>(m_features, m_centers, params, d);
}
template <typename ELEM_TYPE, typename DIST_TYPE>
FLANN_DEPRECATED int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params)
{
printf("[WARNING] cv::flann::hierarchicalClustering<ELEM_TYPE,DIST_TYPE> is deprecated, use "
"cv::flann::hierarchicalClustering<Distance> instead\n");
if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L2 ) {
return hierarchicalClustering< L2<ELEM_TYPE> >(features, centers, params);
}
else if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L1 ) {
return hierarchicalClustering< L1<ELEM_TYPE> >(features, centers, params);
}
else {
printf("[ERROR] cv::flann::hierarchicalClustering<ELEM_TYPE,DIST_TYPE> only provides backwards "
"compatibility for the L1 and L2 distances. "
"For other distance types you must use cv::flann::hierarchicalClustering<Distance>\n");
CV_Assert(0);
}
}
} } // namespace cv::flann
#endif // __cplusplus
#endif

View File

@ -32,7 +32,6 @@
#define OPENCV_FLANN_BASE_HPP_ #define OPENCV_FLANN_BASE_HPP_
#include <vector> #include <vector>
#include <string>
#include <cassert> #include <cassert>
#include <cstdio> #include <cstdio>
@ -62,7 +61,7 @@ inline void log_verbosity(int level)
*/ */
struct SavedIndexParams : public IndexParams struct SavedIndexParams : public IndexParams
{ {
SavedIndexParams(std::string filename) SavedIndexParams(cv::String filename)
{ {
(* this)["algorithm"] = FLANN_INDEX_SAVED; (* this)["algorithm"] = FLANN_INDEX_SAVED;
(*this)["filename"] = filename; (*this)["filename"] = filename;
@ -71,7 +70,7 @@ struct SavedIndexParams : public IndexParams
template<typename Distance> template<typename Distance>
NNIndex<Distance>* load_saved_index(const Matrix<typename Distance::ElementType>& dataset, const std::string& filename, Distance distance) NNIndex<Distance>* load_saved_index(const Matrix<typename Distance::ElementType>& dataset, const cv::String& filename, Distance distance)
{ {
typedef typename Distance::ElementType ElementType; typedef typename Distance::ElementType ElementType;
@ -81,9 +80,11 @@ NNIndex<Distance>* load_saved_index(const Matrix<typename Distance::ElementType>
} }
IndexHeader header = load_header(fin); IndexHeader header = load_header(fin);
if (header.data_type != Datatype<ElementType>::type()) { if (header.data_type != Datatype<ElementType>::type()) {
fclose(fin);
throw FLANNException("Datatype of saved index is different than of the one to be created."); throw FLANNException("Datatype of saved index is different than of the one to be created.");
} }
if ((size_t(header.rows) != dataset.rows)||(size_t(header.cols) != dataset.cols)) { if ((size_t(header.rows) != dataset.rows)||(size_t(header.cols) != dataset.cols)) {
fclose(fin);
throw FLANNException("The index saved belongs to a different dataset"); throw FLANNException("The index saved belongs to a different dataset");
} }
@ -111,7 +112,7 @@ public:
loaded_ = false; loaded_ = false;
if (index_type == FLANN_INDEX_SAVED) { if (index_type == FLANN_INDEX_SAVED) {
nnIndex_ = load_saved_index<Distance>(features, get_param<std::string>(params,"filename"), distance); nnIndex_ = load_saved_index<Distance>(features, get_param<cv::String>(params,"filename"), distance);
loaded_ = true; loaded_ = true;
} }
else { else {
@ -127,14 +128,14 @@ public:
/** /**
* Builds the index. * Builds the index.
*/ */
void buildIndex() void buildIndex() CV_OVERRIDE
{ {
if (!loaded_) { if (!loaded_) {
nnIndex_->buildIndex(); nnIndex_->buildIndex();
} }
} }
void save(std::string filename) void save(cv::String filename)
{ {
FILE* fout = fopen(filename.c_str(), "wb"); FILE* fout = fopen(filename.c_str(), "wb");
if (fout == NULL) { if (fout == NULL) {
@ -149,7 +150,7 @@ public:
* \brief Saves the index to a stream * \brief Saves the index to a stream
* \param stream The stream to save the index to * \param stream The stream to save the index to
*/ */
virtual void saveIndex(FILE* stream) virtual void saveIndex(FILE* stream) CV_OVERRIDE
{ {
nnIndex_->saveIndex(stream); nnIndex_->saveIndex(stream);
} }
@ -158,7 +159,7 @@ public:
* \brief Loads the index from a stream * \brief Loads the index from a stream
* \param stream The stream from which the index is loaded * \param stream The stream from which the index is loaded
*/ */
virtual void loadIndex(FILE* stream) virtual void loadIndex(FILE* stream) CV_OVERRIDE
{ {
nnIndex_->loadIndex(stream); nnIndex_->loadIndex(stream);
} }
@ -166,7 +167,7 @@ public:
/** /**
* \returns number of features in this index. * \returns number of features in this index.
*/ */
size_t veclen() const size_t veclen() const CV_OVERRIDE
{ {
return nnIndex_->veclen(); return nnIndex_->veclen();
} }
@ -174,7 +175,7 @@ public:
/** /**
* \returns The dimensionality of the features in this index. * \returns The dimensionality of the features in this index.
*/ */
size_t size() const size_t size() const CV_OVERRIDE
{ {
return nnIndex_->size(); return nnIndex_->size();
} }
@ -182,7 +183,7 @@ public:
/** /**
* \returns The index type (kdtree, kmeans,...) * \returns The index type (kdtree, kmeans,...)
*/ */
flann_algorithm_t getType() const flann_algorithm_t getType() const CV_OVERRIDE
{ {
return nnIndex_->getType(); return nnIndex_->getType();
} }
@ -190,7 +191,7 @@ public:
/** /**
* \returns The amount of memory (in bytes) used by the index. * \returns The amount of memory (in bytes) used by the index.
*/ */
virtual int usedMemory() const virtual int usedMemory() const CV_OVERRIDE
{ {
return nnIndex_->usedMemory(); return nnIndex_->usedMemory();
} }
@ -199,7 +200,7 @@ public:
/** /**
* \returns The index parameters * \returns The index parameters
*/ */
IndexParams getParameters() const IndexParams getParameters() const CV_OVERRIDE
{ {
return nnIndex_->getParameters(); return nnIndex_->getParameters();
} }
@ -212,7 +213,7 @@ public:
* \param[in] knn Number of nearest neighbors to return * \param[in] knn Number of nearest neighbors to return
* \param[in] params Search parameters * \param[in] params Search parameters
*/ */
void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params) void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params) CV_OVERRIDE
{ {
nnIndex_->knnSearch(queries, indices, dists, knn, params); nnIndex_->knnSearch(queries, indices, dists, knn, params);
} }
@ -226,7 +227,7 @@ public:
* \param[in] params Search parameters * \param[in] params Search parameters
* \returns Number of neighbors found * \returns Number of neighbors found
*/ */
int radiusSearch(const Matrix<ElementType>& query, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) int radiusSearch(const Matrix<ElementType>& query, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) CV_OVERRIDE
{ {
return nnIndex_->radiusSearch(query, indices, dists, radius, params); return nnIndex_->radiusSearch(query, indices, dists, radius, params);
} }
@ -234,7 +235,7 @@ public:
/** /**
* \brief Method that searches for nearest-neighbours * \brief Method that searches for nearest-neighbours
*/ */
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) CV_OVERRIDE
{ {
nnIndex_->findNeighbors(result, vec, searchParams); nnIndex_->findNeighbors(result, vec, searchParams);
} }
@ -242,7 +243,7 @@ public:
/** /**
* \brief Returns actual index * \brief Returns actual index
*/ */
FLANN_DEPRECATED NNIndex<Distance>* getIndex() CV_DEPRECATED NNIndex<Distance>* getIndex()
{ {
return nnIndex_; return nnIndex_;
} }
@ -251,7 +252,7 @@ public:
* \brief Returns index parameters. * \brief Returns index parameters.
* \deprecated use getParameters() instead. * \deprecated use getParameters() instead.
*/ */
FLANN_DEPRECATED const IndexParams* getIndexParameters() CV_DEPRECATED const IndexParams* getIndexParameters()
{ {
return &index_params_; return &index_params_;
} }
@ -263,6 +264,9 @@ private:
bool loaded_; bool loaded_;
/** Parameters passed to the index */ /** Parameters passed to the index */
IndexParams index_params_; IndexParams index_params_;
Index(const Index &); // copy disabled
Index& operator=(const Index &); // assign disabled
}; };
/** /**

View File

@ -31,19 +31,17 @@
#ifndef OPENCV_FLANN_GENERAL_H_ #ifndef OPENCV_FLANN_GENERAL_H_
#define OPENCV_FLANN_GENERAL_H_ #define OPENCV_FLANN_GENERAL_H_
#include "defines.h" #include "opencv2/core.hpp"
#include <stdexcept>
#include <cassert>
namespace cvflann namespace cvflann
{ {
class FLANNException : public std::runtime_error class FLANNException : public cv::Exception
{ {
public: public:
FLANNException(const char* message) : std::runtime_error(message) { } FLANNException(const char* message) : cv::Exception(0, message, "", __FILE__, __LINE__) { }
FLANNException(const std::string& message) : std::runtime_error(message) { } FLANNException(const cv::String& message) : cv::Exception(0, message, "", __FILE__, __LINE__) { }
}; };
} }

View File

@ -73,7 +73,7 @@ hid_t get_hdf5_type<double>() { return H5T_NATIVE_DOUBLE; }
#define CHECK_ERROR(x,y) if ((x)<0) throw FLANNException((y)); #define CHECK_ERROR(x,y) if ((x)<0) throw FLANNException((y));
template<typename T> template<typename T>
void save_to_file(const cvflann::Matrix<T>& dataset, const std::string& filename, const std::string& name) void save_to_file(const cvflann::Matrix<T>& dataset, const String& filename, const String& name)
{ {
#if H5Eset_auto_vers == 2 #if H5Eset_auto_vers == 2
@ -125,7 +125,7 @@ void save_to_file(const cvflann::Matrix<T>& dataset, const std::string& filename
template<typename T> template<typename T>
void load_from_file(cvflann::Matrix<T>& dataset, const std::string& filename, const std::string& name) void load_from_file(cvflann::Matrix<T>& dataset, const String& filename, const String& name)
{ {
herr_t status; herr_t status;
hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
@ -166,7 +166,7 @@ namespace mpi
* @param name Name of dataset inside file * @param name Name of dataset inside file
*/ */
template<typename T> template<typename T>
void load_from_file(cvflann::Matrix<T>& dataset, const std::string& filename, const std::string& name) void load_from_file(cvflann::Matrix<T>& dataset, const String& filename, const String& name)
{ {
MPI_Comm comm = MPI_COMM_WORLD; MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL; MPI_Info info = MPI_INFO_NULL;

View File

@ -32,7 +32,6 @@
#define OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ #define OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_
#include <algorithm> #include <algorithm>
#include <string>
#include <map> #include <map>
#include <cassert> #include <cassert>
#include <limits> #include <limits>
@ -267,6 +266,84 @@ private:
} }
/**
* Chooses the initial centers in a way inspired by Gonzales (by Pierre-Emmanuel Viel):
* select the first point of the list as a candidate, then parse the points list. If another
* point is further than current candidate from the other centers, test if it is a good center
* of a local aggregation. If it is, replace current candidate by this point. And so on...
*
* Used with KMeansIndex that computes centers coordinates by averaging positions of clusters points,
* this doesn't make a real difference with previous methods. But used with HierarchicalClusteringIndex
* class that pick centers among existing points instead of computing the barycenters, there is a real
* improvement.
*
* Params:
* k = number of centers
* vecs = the dataset of points
* indices = indices in the dataset
* Returns:
*/
void GroupWiseCenterChooser(int k, int* dsindices, int indices_length, int* centers, int& centers_length)
{
const float kSpeedUpFactor = 1.3f;
int n = indices_length;
DistanceType* closestDistSq = new DistanceType[n];
// Choose one random center and set the closestDistSq values
int index = rand_int(n);
assert(index >=0 && index < n);
centers[0] = dsindices[index];
for (int i = 0; i < n; i++) {
closestDistSq[i] = distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols);
}
// Choose each center
int centerCount;
for (centerCount = 1; centerCount < k; centerCount++) {
// Repeat several trials
double bestNewPot = -1;
int bestNewIndex = 0;
DistanceType furthest = 0;
for (index = 0; index < n; index++) {
// We will test only the potential of the points further than current candidate
if( closestDistSq[index] > kSpeedUpFactor * (float)furthest ) {
// Compute the new potential
double newPot = 0;
for (int i = 0; i < n; i++) {
newPot += std::min( distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols)
, closestDistSq[i] );
}
// Store the best result
if ((bestNewPot < 0)||(newPot <= bestNewPot)) {
bestNewPot = newPot;
bestNewIndex = index;
furthest = closestDistSq[index];
}
}
}
// Add the appropriate center
centers[centerCount] = dsindices[bestNewIndex];
for (int i = 0; i < n; i++) {
closestDistSq[i] = std::min( distance(dataset[dsindices[i]], dataset[dsindices[bestNewIndex]], dataset.cols)
, closestDistSq[i] );
}
}
centers_length = centerCount;
delete[] closestDistSq;
}
public: public:
@ -300,6 +377,9 @@ public:
else if (centers_init_==FLANN_CENTERS_KMEANSPP) { else if (centers_init_==FLANN_CENTERS_KMEANSPP) {
chooseCenters = &HierarchicalClusteringIndex::chooseCentersKMeanspp; chooseCenters = &HierarchicalClusteringIndex::chooseCentersKMeanspp;
} }
else if (centers_init_==FLANN_CENTERS_GROUPWISE) {
chooseCenters = &HierarchicalClusteringIndex::GroupWiseCenterChooser;
}
else { else {
throw FLANNException("Unknown algorithm for choosing initial centers."); throw FLANNException("Unknown algorithm for choosing initial centers.");
} }
@ -355,7 +435,7 @@ public:
/** /**
* Returns size of index. * Returns size of index.
*/ */
size_t size() const size_t size() const CV_OVERRIDE
{ {
return size_; return size_;
} }
@ -363,7 +443,7 @@ public:
/** /**
* Returns the length of an index feature. * Returns the length of an index feature.
*/ */
size_t veclen() const size_t veclen() const CV_OVERRIDE
{ {
return veclen_; return veclen_;
} }
@ -373,7 +453,7 @@ public:
* Computes the inde memory usage * Computes the inde memory usage
* Returns: memory used by the index * Returns: memory used by the index
*/ */
int usedMemory() const int usedMemory() const CV_OVERRIDE
{ {
return pool.usedMemory+pool.wastedMemory+memoryCounter; return pool.usedMemory+pool.wastedMemory+memoryCounter;
} }
@ -381,7 +461,7 @@ public:
/** /**
* Builds the index * Builds the index
*/ */
void buildIndex() void buildIndex() CV_OVERRIDE
{ {
if (branching_<2) { if (branching_<2) {
throw FLANNException("Branching factor must be at least 2"); throw FLANNException("Branching factor must be at least 2");
@ -400,13 +480,13 @@ public:
} }
flann_algorithm_t getType() const flann_algorithm_t getType() const CV_OVERRIDE
{ {
return FLANN_INDEX_HIERARCHICAL; return FLANN_INDEX_HIERARCHICAL;
} }
void saveIndex(FILE* stream) void saveIndex(FILE* stream) CV_OVERRIDE
{ {
save_value(stream, branching_); save_value(stream, branching_);
save_value(stream, trees_); save_value(stream, trees_);
@ -421,7 +501,7 @@ public:
} }
void loadIndex(FILE* stream) void loadIndex(FILE* stream) CV_OVERRIDE
{ {
free_elements(); free_elements();
@ -464,7 +544,7 @@ public:
* vec = the vector for which to search the nearest neighbors * vec = the vector for which to search the nearest neighbors
* searchParams = parameters that influence the search algorithm (checks) * searchParams = parameters that influence the search algorithm (checks)
*/ */
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) CV_OVERRIDE
{ {
int maxChecks = get_param(searchParams,"checks",32); int maxChecks = get_param(searchParams,"checks",32);
@ -489,7 +569,7 @@ public:
} }
IndexParams getParameters() const IndexParams getParameters() const CV_OVERRIDE
{ {
return params; return params;
} }

View File

@ -120,24 +120,29 @@ public:
/** /**
* Builds the index * Builds the index
*/ */
void buildIndex() void buildIndex() CV_OVERRIDE
{ {
/* Construct the randomized trees. */ /* Construct the randomized trees. */
for (int i = 0; i < trees_; i++) { for (int i = 0; i < trees_; i++) {
/* Randomize the order of vectors to allow for unbiased sampling. */ /* Randomize the order of vectors to allow for unbiased sampling. */
#ifndef OPENCV_FLANN_USE_STD_RAND
cv::randShuffle(vind_);
#else
std::random_shuffle(vind_.begin(), vind_.end()); std::random_shuffle(vind_.begin(), vind_.end());
#endif
tree_roots_[i] = divideTree(&vind_[0], int(size_) ); tree_roots_[i] = divideTree(&vind_[0], int(size_) );
} }
} }
flann_algorithm_t getType() const flann_algorithm_t getType() const CV_OVERRIDE
{ {
return FLANN_INDEX_KDTREE; return FLANN_INDEX_KDTREE;
} }
void saveIndex(FILE* stream) void saveIndex(FILE* stream) CV_OVERRIDE
{ {
save_value(stream, trees_); save_value(stream, trees_);
for (int i=0; i<trees_; ++i) { for (int i=0; i<trees_; ++i) {
@ -147,7 +152,7 @@ public:
void loadIndex(FILE* stream) void loadIndex(FILE* stream) CV_OVERRIDE
{ {
load_value(stream, trees_); load_value(stream, trees_);
if (tree_roots_!=NULL) { if (tree_roots_!=NULL) {
@ -165,7 +170,7 @@ public:
/** /**
* Returns size of index. * Returns size of index.
*/ */
size_t size() const size_t size() const CV_OVERRIDE
{ {
return size_; return size_;
} }
@ -173,7 +178,7 @@ public:
/** /**
* Returns the length of an index feature. * Returns the length of an index feature.
*/ */
size_t veclen() const size_t veclen() const CV_OVERRIDE
{ {
return veclen_; return veclen_;
} }
@ -182,7 +187,7 @@ public:
* Computes the inde memory usage * Computes the inde memory usage
* Returns: memory used by the index * Returns: memory used by the index
*/ */
int usedMemory() const int usedMemory() const CV_OVERRIDE
{ {
return int(pool_.usedMemory+pool_.wastedMemory+dataset_.rows*sizeof(int)); // pool memory and vind array memory return int(pool_.usedMemory+pool_.wastedMemory+dataset_.rows*sizeof(int)); // pool memory and vind array memory
} }
@ -196,7 +201,7 @@ public:
* vec = the vector for which to search the nearest neighbors * vec = the vector for which to search the nearest neighbors
* maxCheck = the maximum number of restarts (in a best-bin-first manner) * maxCheck = the maximum number of restarts (in a best-bin-first manner)
*/ */
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) CV_OVERRIDE
{ {
int maxChecks = get_param(searchParams,"checks", 32); int maxChecks = get_param(searchParams,"checks", 32);
float epsError = 1+get_param(searchParams,"eps",0.0f); float epsError = 1+get_param(searchParams,"eps",0.0f);
@ -209,7 +214,7 @@ public:
} }
} }
IndexParams getParameters() const IndexParams getParameters() const CV_OVERRIDE
{ {
return index_params_; return index_params_;
} }

View File

@ -87,6 +87,7 @@ public:
{ {
size_ = dataset_.rows; size_ = dataset_.rows;
dim_ = dataset_.cols; dim_ = dataset_.cols;
root_node_ = 0;
int dim_param = get_param(params,"dim",-1); int dim_param = get_param(params,"dim",-1);
if (dim_param>0) dim_ = dim_param; if (dim_param>0) dim_ = dim_param;
leaf_max_size_ = get_param(params,"leaf_max_size",10); leaf_max_size_ = get_param(params,"leaf_max_size",10);
@ -113,7 +114,7 @@ public:
/** /**
* Builds the index * Builds the index
*/ */
void buildIndex() void buildIndex() CV_OVERRIDE
{ {
computeBoundingBox(root_bbox_); computeBoundingBox(root_bbox_);
root_node_ = divideTree(0, (int)size_, root_bbox_ ); // construct the tree root_node_ = divideTree(0, (int)size_, root_bbox_ ); // construct the tree
@ -132,13 +133,13 @@ public:
} }
} }
flann_algorithm_t getType() const flann_algorithm_t getType() const CV_OVERRIDE
{ {
return FLANN_INDEX_KDTREE_SINGLE; return FLANN_INDEX_KDTREE_SINGLE;
} }
void saveIndex(FILE* stream) void saveIndex(FILE* stream) CV_OVERRIDE
{ {
save_value(stream, size_); save_value(stream, size_);
save_value(stream, dim_); save_value(stream, dim_);
@ -153,7 +154,7 @@ public:
} }
void loadIndex(FILE* stream) void loadIndex(FILE* stream) CV_OVERRIDE
{ {
load_value(stream, size_); load_value(stream, size_);
load_value(stream, dim_); load_value(stream, dim_);
@ -178,7 +179,7 @@ public:
/** /**
* Returns size of index. * Returns size of index.
*/ */
size_t size() const size_t size() const CV_OVERRIDE
{ {
return size_; return size_;
} }
@ -186,7 +187,7 @@ public:
/** /**
* Returns the length of an index feature. * Returns the length of an index feature.
*/ */
size_t veclen() const size_t veclen() const CV_OVERRIDE
{ {
return dim_; return dim_;
} }
@ -195,7 +196,7 @@ public:
* Computes the inde memory usage * Computes the inde memory usage
* Returns: memory used by the index * Returns: memory used by the index
*/ */
int usedMemory() const int usedMemory() const CV_OVERRIDE
{ {
return (int)(pool_.usedMemory+pool_.wastedMemory+dataset_.rows*sizeof(int)); // pool memory and vind array memory return (int)(pool_.usedMemory+pool_.wastedMemory+dataset_.rows*sizeof(int)); // pool memory and vind array memory
} }
@ -209,7 +210,7 @@ public:
* \param[in] knn Number of nearest neighbors to return * \param[in] knn Number of nearest neighbors to return
* \param[in] params Search parameters * \param[in] params Search parameters
*/ */
void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params) void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params) CV_OVERRIDE
{ {
assert(queries.cols == veclen()); assert(queries.cols == veclen());
assert(indices.rows >= queries.rows); assert(indices.rows >= queries.rows);
@ -224,7 +225,7 @@ public:
} }
} }
IndexParams getParameters() const IndexParams getParameters() const CV_OVERRIDE
{ {
return index_params_; return index_params_;
} }
@ -238,7 +239,7 @@ public:
* vec = the vector for which to search the nearest neighbors * vec = the vector for which to search the nearest neighbors
* maxCheck = the maximum number of restarts (in a best-bin-first manner) * maxCheck = the maximum number of restarts (in a best-bin-first manner)
*/ */
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) CV_OVERRIDE
{ {
float epsError = 1+get_param(searchParams,"eps",0.0f); float epsError = 1+get_param(searchParams,"eps",0.0f);

View File

@ -32,7 +32,6 @@
#define OPENCV_FLANN_KMEANS_INDEX_H_ #define OPENCV_FLANN_KMEANS_INDEX_H_
#include <algorithm> #include <algorithm>
#include <string>
#include <map> #include <map>
#include <cassert> #include <cassert>
#include <limits> #include <limits>
@ -267,11 +266,61 @@ public:
public: public:
flann_algorithm_t getType() const flann_algorithm_t getType() const CV_OVERRIDE
{ {
return FLANN_INDEX_KMEANS; return FLANN_INDEX_KMEANS;
} }
class KMeansDistanceComputer : public cv::ParallelLoopBody
{
public:
KMeansDistanceComputer(Distance _distance, const Matrix<ElementType>& _dataset,
const int _branching, const int* _indices, const Matrix<double>& _dcenters, const size_t _veclen,
std::vector<int> &_new_centroids, std::vector<DistanceType> &_sq_dists)
: distance(_distance)
, dataset(_dataset)
, branching(_branching)
, indices(_indices)
, dcenters(_dcenters)
, veclen(_veclen)
, new_centroids(_new_centroids)
, sq_dists(_sq_dists)
{
}
void operator()(const cv::Range& range) const CV_OVERRIDE
{
const int begin = range.start;
const int end = range.end;
for( int i = begin; i<end; ++i)
{
DistanceType sq_dist(distance(dataset[indices[i]], dcenters[0], veclen));
int new_centroid(0);
for (int j=1; j<branching; ++j) {
DistanceType new_sq_dist = distance(dataset[indices[i]], dcenters[j], veclen);
if (sq_dist>new_sq_dist) {
new_centroid = j;
sq_dist = new_sq_dist;
}
}
sq_dists[i] = sq_dist;
new_centroids[i] = new_centroid;
}
}
private:
Distance distance;
const Matrix<ElementType>& dataset;
const int branching;
const int* indices;
const Matrix<double>& dcenters;
const size_t veclen;
std::vector<int> &new_centroids;
std::vector<DistanceType> &sq_dists;
KMeansDistanceComputer& operator=( const KMeansDistanceComputer & ) { return *this; }
};
/** /**
* Index constructor * Index constructor
* *
@ -334,7 +383,7 @@ public:
/** /**
* Returns size of index. * Returns size of index.
*/ */
size_t size() const size_t size() const CV_OVERRIDE
{ {
return size_; return size_;
} }
@ -342,7 +391,7 @@ public:
/** /**
* Returns the length of an index feature. * Returns the length of an index feature.
*/ */
size_t veclen() const size_t veclen() const CV_OVERRIDE
{ {
return veclen_; return veclen_;
} }
@ -357,7 +406,7 @@ public:
* Computes the inde memory usage * Computes the inde memory usage
* Returns: memory used by the index * Returns: memory used by the index
*/ */
int usedMemory() const int usedMemory() const CV_OVERRIDE
{ {
return pool_.usedMemory+pool_.wastedMemory+memoryCounter_; return pool_.usedMemory+pool_.wastedMemory+memoryCounter_;
} }
@ -365,7 +414,7 @@ public:
/** /**
* Builds the index * Builds the index
*/ */
void buildIndex() void buildIndex() CV_OVERRIDE
{ {
if (branching_<2) { if (branching_<2) {
throw FLANNException("Branching factor must be at least 2"); throw FLANNException("Branching factor must be at least 2");
@ -377,12 +426,14 @@ public:
} }
root_ = pool_.allocate<KMeansNode>(); root_ = pool_.allocate<KMeansNode>();
std::memset(root_, 0, sizeof(KMeansNode));
computeNodeStatistics(root_, indices_, (int)size_); computeNodeStatistics(root_, indices_, (int)size_);
computeClustering(root_, indices_, (int)size_, branching_,0); computeClustering(root_, indices_, (int)size_, branching_,0);
} }
void saveIndex(FILE* stream) void saveIndex(FILE* stream) CV_OVERRIDE
{ {
save_value(stream, branching_); save_value(stream, branching_);
save_value(stream, iterations_); save_value(stream, iterations_);
@ -394,7 +445,7 @@ public:
} }
void loadIndex(FILE* stream) void loadIndex(FILE* stream) CV_OVERRIDE
{ {
load_value(stream, branching_); load_value(stream, branching_);
load_value(stream, iterations_); load_value(stream, iterations_);
@ -429,7 +480,7 @@ public:
* vec = the vector for which to search the nearest neighbors * vec = the vector for which to search the nearest neighbors
* searchParams = parameters that influence the search algorithm (checks, cb_index) * searchParams = parameters that influence the search algorithm (checks, cb_index)
*/ */
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) CV_OVERRIDE
{ {
int maxChecks = get_param(searchParams,"checks",32); int maxChecks = get_param(searchParams,"checks",32);
@ -488,7 +539,7 @@ public:
return clusterCount; return clusterCount;
} }
IndexParams getParameters() const IndexParams getParameters() const CV_OVERRIDE
{ {
return index_params_; return index_params_;
} }
@ -659,7 +710,8 @@ private:
return; return;
} }
int* centers_idx = new int[branching]; cv::AutoBuffer<int> centers_idx_buf(branching);
int* centers_idx = centers_idx_buf.data();
int centers_length; int centers_length;
(this->*chooseCenters)(branching, indices, indices_length, centers_idx, centers_length); (this->*chooseCenters)(branching, indices, indices_length, centers_idx, centers_length);
@ -667,29 +719,30 @@ private:
node->indices = indices; node->indices = indices;
std::sort(node->indices,node->indices+indices_length); std::sort(node->indices,node->indices+indices_length);
node->childs = NULL; node->childs = NULL;
delete [] centers_idx;
return; return;
} }
Matrix<double> dcenters(new double[branching*veclen_],branching,veclen_); cv::AutoBuffer<double> dcenters_buf(branching*veclen_);
Matrix<double> dcenters(dcenters_buf.data(), branching, veclen_);
for (int i=0; i<centers_length; ++i) { for (int i=0; i<centers_length; ++i) {
ElementType* vec = dataset_[centers_idx[i]]; ElementType* vec = dataset_[centers_idx[i]];
for (size_t k=0; k<veclen_; ++k) { for (size_t k=0; k<veclen_; ++k) {
dcenters[i][k] = double(vec[k]); dcenters[i][k] = double(vec[k]);
} }
} }
delete[] centers_idx;
std::vector<DistanceType> radiuses(branching); std::vector<DistanceType> radiuses(branching);
int* count = new int[branching]; cv::AutoBuffer<int> count_buf(branching);
int* count = count_buf.data();
for (int i=0; i<branching; ++i) { for (int i=0; i<branching; ++i) {
radiuses[i] = 0; radiuses[i] = 0;
count[i] = 0; count[i] = 0;
} }
// assign points to clusters // assign points to clusters
int* belongs_to = new int[indices_length]; cv::AutoBuffer<int> belongs_to_buf(indices_length);
int* belongs_to = belongs_to_buf.data();
for (int i=0; i<indices_length; ++i) { for (int i=0; i<indices_length; ++i) {
DistanceType sq_dist = distance_(dataset_[indices[i]], dcenters[0], veclen_); DistanceType sq_dist = distance_(dataset_[indices[i]], dcenters[0], veclen_);
@ -732,25 +785,23 @@ private:
} }
} }
std::vector<int> new_centroids(indices_length);
std::vector<DistanceType> sq_dists(indices_length);
// reassign points to clusters // reassign points to clusters
for (int i=0; i<indices_length; ++i) { KMeansDistanceComputer invoker(distance_, dataset_, branching, indices, dcenters, veclen_, new_centroids, sq_dists);
DistanceType sq_dist = distance_(dataset_[indices[i]], dcenters[0], veclen_); parallel_for_(cv::Range(0, (int)indices_length), invoker);
int new_centroid = 0;
for (int j=1; j<branching; ++j) { for (int i=0; i < (int)indices_length; ++i) {
DistanceType new_sq_dist = distance_(dataset_[indices[i]], dcenters[j], veclen_); DistanceType sq_dist(sq_dists[i]);
if (sq_dist>new_sq_dist) { int new_centroid(new_centroids[i]);
new_centroid = j; if (sq_dist > radiuses[new_centroid]) {
sq_dist = new_sq_dist;
}
}
if (sq_dist>radiuses[new_centroid]) {
radiuses[new_centroid] = sq_dist; radiuses[new_centroid] = sq_dist;
} }
if (new_centroid != belongs_to[i]) { if (new_centroid != belongs_to[i]) {
count[belongs_to[i]]--; count[belongs_to[i]]--;
count[new_centroid]++; count[new_centroid]++;
belongs_to[i] = new_centroid; belongs_to[i] = new_centroid;
converged = false; converged = false;
} }
} }
@ -816,19 +867,16 @@ private:
variance -= distance_(centers[c], ZeroIterator<ElementType>(), veclen_); variance -= distance_(centers[c], ZeroIterator<ElementType>(), veclen_);
node->childs[c] = pool_.allocate<KMeansNode>(); node->childs[c] = pool_.allocate<KMeansNode>();
std::memset(node->childs[c], 0, sizeof(KMeansNode));
node->childs[c]->radius = radiuses[c]; node->childs[c]->radius = radiuses[c];
node->childs[c]->pivot = centers[c]; node->childs[c]->pivot = centers[c];
node->childs[c]->variance = variance; node->childs[c]->variance = variance;
node->childs[c]->mean_radius = mean_radius; node->childs[c]->mean_radius = mean_radius;
node->childs[c]->indices = NULL;
computeClustering(node->childs[c],indices+start, end-start, branching, level+1); computeClustering(node->childs[c],indices+start, end-start, branching, level+1);
start=end; start=end;
} }
delete[] dcenters.data;
delete[] centers; delete[] centers;
delete[] count;
delete[] belongs_to;
} }
@ -1006,7 +1054,7 @@ private:
/** /**
* Helper function the descends in the hierarchical k-means tree by spliting those clusters that minimize * Helper function the descends in the hierarchical k-means tree by splitting those clusters that minimize
* the overall variance of the clustering. * the overall variance of the clustering.
* Params: * Params:
* root = root node * root = root node

View File

@ -63,47 +63,47 @@ public:
LinearIndex(const LinearIndex&); LinearIndex(const LinearIndex&);
LinearIndex& operator=(const LinearIndex&); LinearIndex& operator=(const LinearIndex&);
flann_algorithm_t getType() const flann_algorithm_t getType() const CV_OVERRIDE
{ {
return FLANN_INDEX_LINEAR; return FLANN_INDEX_LINEAR;
} }
size_t size() const size_t size() const CV_OVERRIDE
{ {
return dataset_.rows; return dataset_.rows;
} }
size_t veclen() const size_t veclen() const CV_OVERRIDE
{ {
return dataset_.cols; return dataset_.cols;
} }
int usedMemory() const int usedMemory() const CV_OVERRIDE
{ {
return 0; return 0;
} }
void buildIndex() void buildIndex() CV_OVERRIDE
{ {
/* nothing to do here for linear search */ /* nothing to do here for linear search */
} }
void saveIndex(FILE*) void saveIndex(FILE*) CV_OVERRIDE
{ {
/* nothing to do here for linear search */ /* nothing to do here for linear search */
} }
void loadIndex(FILE*) void loadIndex(FILE*) CV_OVERRIDE
{ {
/* nothing to do here for linear search */ /* nothing to do here for linear search */
index_params_["algorithm"] = getType(); index_params_["algorithm"] = getType();
} }
void findNeighbors(ResultSet<DistanceType>& resultSet, const ElementType* vec, const SearchParams& /*searchParams*/) void findNeighbors(ResultSet<DistanceType>& resultSet, const ElementType* vec, const SearchParams& /*searchParams*/) CV_OVERRIDE
{ {
ElementType* data = dataset_.data; ElementType* data = dataset_.data;
for (size_t i = 0; i < dataset_.rows; ++i, data += dataset_.cols) { for (size_t i = 0; i < dataset_.rows; ++i, data += dataset_.cols) {
@ -112,7 +112,7 @@ public:
} }
} }
IndexParams getParameters() const IndexParams getParameters() const CV_OVERRIDE
{ {
return index_params_; return index_params_;
} }

View File

@ -63,7 +63,12 @@ class Logger
stream = stdout; stream = stdout;
} }
else { else {
#ifdef _MSC_VER
if (fopen_s(&stream, name, "w") != 0)
stream = NULL;
#else
stream = fopen(name,"w"); stream = fopen(name,"w");
#endif
if (stream == NULL) { if (stream == NULL) {
stream = stdout; stream = stdout;
} }

View File

@ -107,37 +107,25 @@ public:
/** /**
* Builds the index * Builds the index
*/ */
void buildIndex() void buildIndex() CV_OVERRIDE
{ {
std::vector<size_t> indices(feature_size_ * CHAR_BIT);
tables_.resize(table_number_); tables_.resize(table_number_);
for (unsigned int i = 0; i < table_number_; ++i) { for (unsigned int i = 0; i < table_number_; ++i) {
//re-initialize the random indices table that the LshTable will use to pick its sub-dimensions
if( (indices.size() == feature_size_ * CHAR_BIT) || (indices.size() < key_size_) )
{
indices.resize( feature_size_ * CHAR_BIT );
for (size_t j = 0; j < feature_size_ * CHAR_BIT; ++j)
indices[j] = j;
std::random_shuffle(indices.begin(), indices.end());
}
lsh::LshTable<ElementType>& table = tables_[i]; lsh::LshTable<ElementType>& table = tables_[i];
table = lsh::LshTable<ElementType>(feature_size_, key_size_, indices); table = lsh::LshTable<ElementType>(feature_size_, key_size_);
// Add the features to the table // Add the features to the table
table.add(dataset_); table.add(dataset_);
} }
} }
flann_algorithm_t getType() const flann_algorithm_t getType() const CV_OVERRIDE
{ {
return FLANN_INDEX_LSH; return FLANN_INDEX_LSH;
} }
void saveIndex(FILE* stream) void saveIndex(FILE* stream) CV_OVERRIDE
{ {
save_value(stream,table_number_); save_value(stream,table_number_);
save_value(stream,key_size_); save_value(stream,key_size_);
@ -145,7 +133,7 @@ public:
save_value(stream, dataset_); save_value(stream, dataset_);
} }
void loadIndex(FILE* stream) void loadIndex(FILE* stream) CV_OVERRIDE
{ {
load_value(stream, table_number_); load_value(stream, table_number_);
load_value(stream, key_size_); load_value(stream, key_size_);
@ -163,7 +151,7 @@ public:
/** /**
* Returns size of index. * Returns size of index.
*/ */
size_t size() const size_t size() const CV_OVERRIDE
{ {
return dataset_.rows; return dataset_.rows;
} }
@ -171,7 +159,7 @@ public:
/** /**
* Returns the length of an index feature. * Returns the length of an index feature.
*/ */
size_t veclen() const size_t veclen() const CV_OVERRIDE
{ {
return feature_size_; return feature_size_;
} }
@ -180,13 +168,13 @@ public:
* Computes the index memory usage * Computes the index memory usage
* Returns: memory used by the index * Returns: memory used by the index
*/ */
int usedMemory() const int usedMemory() const CV_OVERRIDE
{ {
return (int)(dataset_.rows * sizeof(int)); return (int)(dataset_.rows * sizeof(int));
} }
IndexParams getParameters() const IndexParams getParameters() const CV_OVERRIDE
{ {
return index_params_; return index_params_;
} }
@ -199,7 +187,7 @@ public:
* \param[in] knn Number of nearest neighbors to return * \param[in] knn Number of nearest neighbors to return
* \param[in] params Search parameters * \param[in] params Search parameters
*/ */
virtual void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params) virtual void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params) CV_OVERRIDE
{ {
assert(queries.cols == veclen()); assert(queries.cols == veclen());
assert(indices.rows >= queries.rows); assert(indices.rows >= queries.rows);
@ -229,7 +217,7 @@ public:
* vec = the vector for which to search the nearest neighbors * vec = the vector for which to search the nearest neighbors
* maxCheck = the maximum number of restarts (in a best-bin-first manner) * maxCheck = the maximum number of restarts (in a best-bin-first manner)
*/ */
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& /*searchParams*/) void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& /*searchParams*/) CV_OVERRIDE
{ {
getNeighbors(vec, result); getNeighbors(vec, result);
} }

View File

@ -146,6 +146,9 @@ public:
*/ */
LshTable() LshTable()
{ {
key_size_ = 0;
feature_size_ = 0;
speed_level_ = kArray;
} }
/** Default constructor /** Default constructor
@ -153,8 +156,10 @@ public:
* @param feature_size is the size of the feature (considered as a ElementType[]) * @param feature_size is the size of the feature (considered as a ElementType[])
* @param key_size is the number of bits that are turned on in the feature * @param key_size is the number of bits that are turned on in the feature
*/ */
LshTable(unsigned int /*feature_size*/, unsigned int /*key_size*/, std::vector<size_t> & /*indices*/) LshTable(unsigned int feature_size, unsigned int key_size)
{ {
feature_size_ = feature_size;
CV_UNUSED(key_size);
std::cerr << "LSH is not implemented for that type" << std::endl; std::cerr << "LSH is not implemented for that type" << std::endl;
assert(0); assert(0);
} }
@ -263,12 +268,10 @@ private:
{ {
const size_t key_size_lower_bound = 1; const size_t key_size_lower_bound = 1;
//a value (size_t(1) << key_size) must fit the size_t type so key_size has to be strictly less than size of size_t //a value (size_t(1) << key_size) must fit the size_t type so key_size has to be strictly less than size of size_t
const size_t key_size_upper_bound = std::min(sizeof(BucketKey) * CHAR_BIT + 1, sizeof(size_t) * CHAR_BIT); const size_t key_size_upper_bound = (std::min)(sizeof(BucketKey) * CHAR_BIT + 1, sizeof(size_t) * CHAR_BIT);
if (key_size < key_size_lower_bound || key_size >= key_size_upper_bound) if (key_size < key_size_lower_bound || key_size >= key_size_upper_bound)
{ {
std::stringstream errorMessage; CV_Error(cv::Error::StsBadArg, cv::format("Invalid key_size (=%d). Valid values for your system are %d <= key_size < %d.", (int)key_size, (int)key_size_lower_bound, (int)key_size_upper_bound));
errorMessage << "Invalid key_size (=" << key_size << "). Valid values for your system are " << key_size_lower_bound << " <= key_size < " << key_size_upper_bound << ".";
CV_Error(CV_StsBadArg, errorMessage.str());
} }
speed_level_ = kHash; speed_level_ = kHash;
@ -330,6 +333,8 @@ private:
*/ */
unsigned int key_size_; unsigned int key_size_;
unsigned int feature_size_;
// Members only used for the unsigned char specialization // Members only used for the unsigned char specialization
/** The mask to apply to a feature to get the hash key /** The mask to apply to a feature to get the hash key
* Only used in the unsigned char case * Only used in the unsigned char case
@ -341,20 +346,25 @@ private:
// Specialization for unsigned char // Specialization for unsigned char
template<> template<>
inline LshTable<unsigned char>::LshTable( unsigned int feature_size, inline LshTable<unsigned char>::LshTable(unsigned int feature_size, unsigned int subsignature_size)
unsigned int subsignature_size,
std::vector<size_t> & indices )
{ {
feature_size_ = feature_size;
initialize(subsignature_size); initialize(subsignature_size);
// Allocate the mask // Allocate the mask
mask_ = std::vector<size_t>((size_t)ceil((float)(feature_size * sizeof(char)) / (float)sizeof(size_t)), 0); mask_ = std::vector<size_t>((feature_size * sizeof(char) + sizeof(size_t) - 1) / sizeof(size_t), 0);
// A bit brutal but fast to code
std::vector<int> indices(feature_size * CHAR_BIT);
for (size_t i = 0; i < feature_size * CHAR_BIT; ++i) indices[i] = (int)i;
#ifndef OPENCV_FLANN_USE_STD_RAND
cv::randShuffle(indices);
#else
std::random_shuffle(indices.begin(), indices.end());
#endif
// Generate a random set of order of subsignature_size_ bits // Generate a random set of order of subsignature_size_ bits
for (unsigned int i = 0; i < key_size_; ++i) { for (unsigned int i = 0; i < key_size_; ++i) {
//Ensure the Nth bit will be selected only once among the different LshTables size_t index = indices[i];
//to avoid having two different tables with signatures sharing many dimensions/many bits
size_t index = indices[0];
indices.erase( indices.begin() );
// Set that bit in the mask // Set that bit in the mask
size_t divisor = CHAR_BIT * sizeof(size_t); size_t divisor = CHAR_BIT * sizeof(size_t);
@ -386,6 +396,7 @@ inline size_t LshTable<unsigned char>::getKey(const unsigned char* feature) cons
{ {
// no need to check if T is dividable by sizeof(size_t) like in the Hamming // no need to check if T is dividable by sizeof(size_t) like in the Hamming
// distance computation as we have a mask // distance computation as we have a mask
// FIXIT: This is bad assumption, because we reading tail bytes after of the allocated features buffer
const size_t* feature_block_ptr = reinterpret_cast<const size_t*> ((const void*)feature); const size_t* feature_block_ptr = reinterpret_cast<const size_t*> ((const void*)feature);
// Figure out the subsignature of the feature // Figure out the subsignature of the feature
@ -394,10 +405,20 @@ inline size_t LshTable<unsigned char>::getKey(const unsigned char* feature) cons
size_t subsignature = 0; size_t subsignature = 0;
size_t bit_index = 1; size_t bit_index = 1;
for (std::vector<size_t>::const_iterator pmask_block = mask_.begin(); pmask_block != mask_.end(); ++pmask_block) { for (unsigned i = 0; i < feature_size_; i += sizeof(size_t)) {
// get the mask and signature blocks // get the mask and signature blocks
size_t feature_block = *feature_block_ptr; size_t feature_block;
size_t mask_block = *pmask_block; if (i <= feature_size_ - sizeof(size_t))
{
feature_block = *feature_block_ptr;
}
else
{
size_t tmp = 0;
memcpy(&tmp, feature_block_ptr, feature_size_ - i); // preserve bytes order
feature_block = tmp;
}
size_t mask_block = mask_[i / sizeof(size_t)];
while (mask_block) { while (mask_block) {
// Get the lowest set bit in the mask block // Get the lowest set bit in the mask block
size_t lowest_bit = mask_block & (-(ptrdiff_t)mask_block); size_t lowest_bit = mask_block & (-(ptrdiff_t)mask_block);

View File

@ -66,7 +66,7 @@ public:
/** /**
* Convenience function for deallocating the storage data. * Convenience function for deallocating the storage data.
*/ */
FLANN_DEPRECATED void free() CV_DEPRECATED void free()
{ {
fprintf(stderr, "The cvflann::Matrix<T>::free() method is deprecated " fprintf(stderr, "The cvflann::Matrix<T>::free() method is deprecated "
"and it does not do any memory deallocation any more. You are" "and it does not do any memory deallocation any more. You are"

View File

@ -40,12 +40,10 @@
// //
//M*/ //M*/
#ifndef _OPENCV_MINIFLANN_HPP_ #ifndef OPENCV_MINIFLANN_HPP
#define _OPENCV_MINIFLANN_HPP_ #define OPENCV_MINIFLANN_HPP
#ifdef __cplusplus #include "opencv2/core.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/flann/defines.h" #include "opencv2/flann/defines.h"
namespace cv namespace cv
@ -59,23 +57,27 @@ struct CV_EXPORTS IndexParams
IndexParams(); IndexParams();
~IndexParams(); ~IndexParams();
std::string getString(const std::string& key, const std::string& defaultVal=std::string()) const; String getString(const String& key, const String& defaultVal=String()) const;
int getInt(const std::string& key, int defaultVal=-1) const; int getInt(const String& key, int defaultVal=-1) const;
double getDouble(const std::string& key, double defaultVal=-1) const; double getDouble(const String& key, double defaultVal=-1) const;
void setString(const std::string& key, const std::string& value); void setString(const String& key, const String& value);
void setInt(const std::string& key, int value); void setInt(const String& key, int value);
void setDouble(const std::string& key, double value); void setDouble(const String& key, double value);
void setFloat(const std::string& key, float value); void setFloat(const String& key, float value);
void setBool(const std::string& key, bool value); void setBool(const String& key, bool value);
void setAlgorithm(int value); void setAlgorithm(int value);
void getAll(std::vector<std::string>& names, void getAll(std::vector<String>& names,
std::vector<int>& types, std::vector<int>& types,
std::vector<std::string>& strValues, std::vector<String>& strValues,
std::vector<double>& numValues) const; std::vector<double>& numValues) const;
void* params; void* params;
private:
IndexParams(const IndexParams &); // copy disabled
IndexParams& operator=(const IndexParams &); // assign disabled
}; };
struct CV_EXPORTS KDTreeIndexParams : public IndexParams struct CV_EXPORTS KDTreeIndexParams : public IndexParams
@ -91,13 +93,13 @@ struct CV_EXPORTS LinearIndexParams : public IndexParams
struct CV_EXPORTS CompositeIndexParams : public IndexParams struct CV_EXPORTS CompositeIndexParams : public IndexParams
{ {
CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11, CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11,
cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2 ); cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2f );
}; };
struct CV_EXPORTS AutotunedIndexParams : public IndexParams struct CV_EXPORTS AutotunedIndexParams : public IndexParams
{ {
AutotunedIndexParams(float target_precision = 0.8, float build_weight = 0.01, AutotunedIndexParams(float target_precision = 0.8f, float build_weight = 0.01f,
float memory_weight = 0, float sample_fraction = 0.1); float memory_weight = 0, float sample_fraction = 0.1f);
}; };
struct CV_EXPORTS HierarchicalClusteringIndexParams : public IndexParams struct CV_EXPORTS HierarchicalClusteringIndexParams : public IndexParams
@ -109,7 +111,7 @@ struct CV_EXPORTS HierarchicalClusteringIndexParams : public IndexParams
struct CV_EXPORTS KMeansIndexParams : public IndexParams struct CV_EXPORTS KMeansIndexParams : public IndexParams
{ {
KMeansIndexParams(int branching = 32, int iterations = 11, KMeansIndexParams(int branching = 32, int iterations = 11,
cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2 ); cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2f );
}; };
struct CV_EXPORTS LshIndexParams : public IndexParams struct CV_EXPORTS LshIndexParams : public IndexParams
@ -119,7 +121,7 @@ struct CV_EXPORTS LshIndexParams : public IndexParams
struct CV_EXPORTS SavedIndexParams : public IndexParams struct CV_EXPORTS SavedIndexParams : public IndexParams
{ {
SavedIndexParams(const std::string& filename); SavedIndexParams(const String& filename);
}; };
struct CV_EXPORTS SearchParams : public IndexParams struct CV_EXPORTS SearchParams : public IndexParams
@ -142,8 +144,8 @@ public:
OutputArray dists, double radius, int maxResults, OutputArray dists, double radius, int maxResults,
const SearchParams& params=SearchParams()); const SearchParams& params=SearchParams());
CV_WRAP virtual void save(const std::string& filename) const; CV_WRAP virtual void save(const String& filename) const;
CV_WRAP virtual bool load(InputArray features, const std::string& filename); CV_WRAP virtual bool load(InputArray features, const String& filename);
CV_WRAP virtual void release(); CV_WRAP virtual void release();
CV_WRAP cvflann::flann_distance_t getDistance() const; CV_WRAP cvflann::flann_distance_t getDistance() const;
CV_WRAP cvflann::flann_algorithm_t getAlgorithm() const; CV_WRAP cvflann::flann_algorithm_t getAlgorithm() const;
@ -157,6 +159,4 @@ protected:
} } // namespace cv::flann } } // namespace cv::flann
#endif // __cplusplus
#endif #endif

View File

@ -31,8 +31,6 @@
#ifndef OPENCV_FLANN_NNINDEX_H #ifndef OPENCV_FLANN_NNINDEX_H
#define OPENCV_FLANN_NNINDEX_H #define OPENCV_FLANN_NNINDEX_H
#include <string>
#include "general.h" #include "general.h"
#include "matrix.h" #include "matrix.h"
#include "result_set.h" #include "result_set.h"

View File

@ -39,7 +39,7 @@
namespace cvflann namespace cvflann
{ {
typedef std::map<std::string, any> IndexParams; typedef std::map<cv::String, any> IndexParams;
struct SearchParams : public IndexParams struct SearchParams : public IndexParams
{ {
@ -56,7 +56,7 @@ struct SearchParams : public IndexParams
template<typename T> template<typename T>
T get_param(const IndexParams& params, std::string name, const T& default_value) T get_param(const IndexParams& params, cv::String name, const T& default_value)
{ {
IndexParams::const_iterator it = params.find(name); IndexParams::const_iterator it = params.find(name);
if (it != params.end()) { if (it != params.end()) {
@ -68,14 +68,14 @@ T get_param(const IndexParams& params, std::string name, const T& default_value)
} }
template<typename T> template<typename T>
T get_param(const IndexParams& params, std::string name) T get_param(const IndexParams& params, cv::String name)
{ {
IndexParams::const_iterator it = params.find(name); IndexParams::const_iterator it = params.find(name);
if (it != params.end()) { if (it != params.end()) {
return it->second.cast<T>(); return it->second.cast<T>();
} }
else { else {
throw FLANNException(std::string("Missing parameter '")+name+std::string("' in the parameters given")); throw FLANNException(cv::String("Missing parameter '")+name+cv::String("' in the parameters given"));
} }
} }

View File

@ -40,13 +40,31 @@
namespace cvflann namespace cvflann
{ {
inline int rand()
{
#ifndef OPENCV_FLANN_USE_STD_RAND
# if INT_MAX == RAND_MAX
int v = cv::theRNG().next() & INT_MAX;
# else
int v = cv::theRNG().uniform(0, RAND_MAX + 1);
# endif
#else
int v = std::rand();
#endif // OPENCV_FLANN_USE_STD_RAND
return v;
}
/** /**
* Seeds the random number generator * Seeds the random number generator
* @param seed Random seed * @param seed Random seed
*/ */
inline void seed_random(unsigned int seed) inline void seed_random(unsigned int seed)
{ {
srand(seed); #ifndef OPENCV_FLANN_USE_STD_RAND
cv::theRNG() = cv::RNG(seed);
#else
std::srand(seed);
#endif
} }
/* /*
@ -60,7 +78,7 @@ inline void seed_random(unsigned int seed)
*/ */
inline double rand_double(double high = 1.0, double low = 0) inline double rand_double(double high = 1.0, double low = 0)
{ {
return low + ((high-low) * (std::rand() / (RAND_MAX + 1.0))); return low + ((high-low) * (rand() / (RAND_MAX + 1.0)));
} }
/** /**
@ -71,7 +89,7 @@ inline double rand_double(double high = 1.0, double low = 0)
*/ */
inline int rand_int(int high = RAND_MAX, int low = 0) inline int rand_int(int high = RAND_MAX, int low = 0)
{ {
return low + (int) ( double(high-low) * (std::rand() / (RAND_MAX + 1.0))); return low + (int) ( double(high-low) * (rand() / (RAND_MAX + 1.0)));
} }
/** /**
@ -107,7 +125,11 @@ public:
for (int i = 0; i < size_; ++i) vals_[i] = i; for (int i = 0; i < size_; ++i) vals_[i] = i;
// shuffle the elements in the array // shuffle the elements in the array
#ifndef OPENCV_FLANN_USE_STD_RAND
cv::randShuffle(vals_);
#else
std::random_shuffle(vals_.begin(), vals_.end()); std::random_shuffle(vals_.begin(), vals_.end());
#endif
counter_ = 0; counter_ = 0;
} }

View File

@ -109,13 +109,13 @@ public:
return count; return count;
} }
bool full() const bool full() const CV_OVERRIDE
{ {
return count == capacity; return count == capacity;
} }
void addPoint(DistanceType dist, int index) void addPoint(DistanceType dist, int index) CV_OVERRIDE
{ {
if (dist >= worst_distance_) return; if (dist >= worst_distance_) return;
int i; int i;
@ -139,7 +139,7 @@ public:
worst_distance_ = dists[capacity-1]; worst_distance_ = dists[capacity-1];
} }
DistanceType worstDist() const DistanceType worstDist() const CV_OVERRIDE
{ {
return worst_distance_; return worst_distance_;
} }
@ -176,13 +176,13 @@ public:
return count; return count;
} }
bool full() const bool full() const CV_OVERRIDE
{ {
return count == capacity; return count == capacity;
} }
void addPoint(DistanceType dist, int index) void addPoint(DistanceType dist, int index) CV_OVERRIDE
{ {
if (dist >= worst_distance_) return; if (dist >= worst_distance_) return;
int i; int i;
@ -215,7 +215,7 @@ public:
worst_distance_ = dists[capacity-1]; worst_distance_ = dists[capacity-1];
} }
DistanceType worstDist() const DistanceType worstDist() const CV_OVERRIDE
{ {
return worst_distance_; return worst_distance_;
} }
@ -303,14 +303,14 @@ public:
/** Default cosntructor */ /** Default cosntructor */
UniqueResultSet() : UniqueResultSet() :
worst_distance_(std::numeric_limits<DistanceType>::max()) is_full_(false), worst_distance_(std::numeric_limits<DistanceType>::max())
{ {
} }
/** Check the status of the set /** Check the status of the set
* @return true if we have k NN * @return true if we have k NN
*/ */
inline bool full() const inline bool full() const CV_OVERRIDE
{ {
return is_full_; return is_full_;
} }
@ -365,7 +365,7 @@ public:
* If we don't have enough neighbors, it returns the max possible value * If we don't have enough neighbors, it returns the max possible value
* @return * @return
*/ */
inline DistanceType worstDist() const inline DistanceType worstDist() const CV_OVERRIDE
{ {
return worst_distance_; return worst_distance_;
} }
@ -402,7 +402,7 @@ public:
* @param dist distance for that neighbor * @param dist distance for that neighbor
* @param index index of that neighbor * @param index index of that neighbor
*/ */
inline void addPoint(DistanceType dist, int index) inline void addPoint(DistanceType dist, int index) CV_OVERRIDE
{ {
// Don't do anything if we are worse than the worst // Don't do anything if we are worse than the worst
if (dist >= worst_distance_) return; if (dist >= worst_distance_) return;
@ -422,7 +422,7 @@ public:
/** Remove all elements in the set /** Remove all elements in the set
*/ */
void clear() void clear() CV_OVERRIDE
{ {
dist_indices_.clear(); dist_indices_.clear();
worst_distance_ = std::numeric_limits<DistanceType>::max(); worst_distance_ = std::numeric_limits<DistanceType>::max();
@ -449,7 +449,7 @@ class RadiusUniqueResultSet : public UniqueResultSet<DistanceType>
{ {
public: public:
/** Constructor /** Constructor
* @param capacity the number of neighbors to store at max * @param radius the maximum distance of a neighbor
*/ */
RadiusUniqueResultSet(DistanceType radius) : RadiusUniqueResultSet(DistanceType radius) :
radius_(radius) radius_(radius)
@ -461,14 +461,14 @@ public:
* @param dist distance for that neighbor * @param dist distance for that neighbor
* @param index index of that neighbor * @param index index of that neighbor
*/ */
void addPoint(DistanceType dist, int index) void addPoint(DistanceType dist, int index) CV_OVERRIDE
{ {
if (dist <= radius_) dist_indices_.insert(DistIndex(dist, index)); if (dist <= radius_) dist_indices_.insert(DistIndex(dist, index));
} }
/** Remove all elements in the set /** Remove all elements in the set
*/ */
inline void clear() inline void clear() CV_OVERRIDE
{ {
dist_indices_.clear(); dist_indices_.clear();
} }
@ -477,7 +477,7 @@ public:
/** Check the status of the set /** Check the status of the set
* @return alwys false * @return alwys false
*/ */
inline bool full() const inline bool full() const CV_OVERRIDE
{ {
return true; return true;
} }
@ -486,7 +486,7 @@ public:
* If we don't have enough neighbors, it returns the max possible value * If we don't have enough neighbors, it returns the max possible value
* @return * @return
*/ */
inline DistanceType worstDist() const inline DistanceType worstDist() const CV_OVERRIDE
{ {
return radius_; return radius_;
} }
@ -509,6 +509,7 @@ class KNNRadiusUniqueResultSet : public KNNUniqueResultSet<DistanceType>
public: public:
/** Constructor /** Constructor
* @param capacity the number of neighbors to store at max * @param capacity the number of neighbors to store at max
* @param radius the maximum distance of a neighbor
*/ */
KNNRadiusUniqueResultSet(unsigned int capacity, DistanceType radius) KNNRadiusUniqueResultSet(unsigned int capacity, DistanceType radius)
{ {

View File

@ -32,7 +32,8 @@
#define OPENCV_FLANN_TIMER_H #define OPENCV_FLANN_TIMER_H
#include <time.h> #include <time.h>
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
namespace cvflann namespace cvflann
{ {
@ -44,7 +45,7 @@ namespace cvflann
*/ */
class StartStopTimer class StartStopTimer
{ {
clock_t startTime; int64 startTime;
public: public:
/** /**
@ -66,7 +67,7 @@ public:
*/ */
void start() void start()
{ {
startTime = clock(); startTime = cv::getTickCount();
} }
/** /**
@ -74,8 +75,8 @@ public:
*/ */
void stop() void stop()
{ {
clock_t stopTime = clock(); int64 stopTime = cv::getTickCount();
value += ( (double)stopTime - startTime) / CLOCKS_PER_SEC; value += ( (double)stopTime - startTime) / cv::getTickFrequency();
} }
/** /**

View File

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -40,216 +41,8 @@
// //
//M*/ //M*/
#ifndef __OPENCV_HIGHGUI_HPP__ #ifdef __OPENCV_BUILD
#define __OPENCV_HIGHGUI_HPP__ #error this is a compatibility header which should not be used inside the OpenCV library
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui_c.h"
#ifdef __cplusplus
struct CvCapture;
struct CvVideoWriter;
namespace cv
{
enum {
// Flags for namedWindow
WINDOW_NORMAL = CV_WINDOW_NORMAL, // the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size
WINDOW_AUTOSIZE = CV_WINDOW_AUTOSIZE, // the user cannot resize the window, the size is constrainted by the image displayed
WINDOW_OPENGL = CV_WINDOW_OPENGL, // window with opengl support
// Flags for set / getWindowProperty
WND_PROP_FULLSCREEN = CV_WND_PROP_FULLSCREEN, // fullscreen property
WND_PROP_AUTOSIZE = CV_WND_PROP_AUTOSIZE, // autosize property
WND_PROP_ASPECT_RATIO = CV_WND_PROP_ASPECTRATIO, // window's aspect ration
WND_PROP_OPENGL = CV_WND_PROP_OPENGL // opengl support
};
CV_EXPORTS_W void namedWindow(const string& winname, int flags = WINDOW_AUTOSIZE);
CV_EXPORTS_W void destroyWindow(const string& winname);
CV_EXPORTS_W void destroyAllWindows();
CV_EXPORTS_W int startWindowThread();
CV_EXPORTS_W int waitKey(int delay = 0);
CV_EXPORTS_W void imshow(const string& winname, InputArray mat);
CV_EXPORTS_W void resizeWindow(const string& winname, int width, int height);
CV_EXPORTS_W void moveWindow(const string& winname, int x, int y);
CV_EXPORTS_W void setWindowProperty(const string& winname, int prop_id, double prop_value);//YV
CV_EXPORTS_W double getWindowProperty(const string& winname, int prop_id);//YV
enum
{
EVENT_MOUSEMOVE =0,
EVENT_LBUTTONDOWN =1,
EVENT_RBUTTONDOWN =2,
EVENT_MBUTTONDOWN =3,
EVENT_LBUTTONUP =4,
EVENT_RBUTTONUP =5,
EVENT_MBUTTONUP =6,
EVENT_LBUTTONDBLCLK =7,
EVENT_RBUTTONDBLCLK =8,
EVENT_MBUTTONDBLCLK =9
};
enum
{
EVENT_FLAG_LBUTTON =1,
EVENT_FLAG_RBUTTON =2,
EVENT_FLAG_MBUTTON =4,
EVENT_FLAG_CTRLKEY =8,
EVENT_FLAG_SHIFTKEY =16,
EVENT_FLAG_ALTKEY =32
};
typedef void (*MouseCallback)(int event, int x, int y, int flags, void* userdata);
//! assigns callback for mouse events
CV_EXPORTS void setMouseCallback(const string& winname, MouseCallback onMouse, void* userdata = 0);
typedef void (CV_CDECL *TrackbarCallback)(int pos, void* userdata);
CV_EXPORTS int createTrackbar(const string& trackbarname, const string& winname,
int* value, int count,
TrackbarCallback onChange = 0,
void* userdata = 0);
CV_EXPORTS_W int getTrackbarPos(const string& trackbarname, const string& winname);
CV_EXPORTS_W void setTrackbarPos(const string& trackbarname, const string& winname, int pos);
// OpenGL support
typedef void (*OpenGlDrawCallback)(void* userdata);
CV_EXPORTS void setOpenGlDrawCallback(const string& winname, OpenGlDrawCallback onOpenGlDraw, void* userdata = 0);
CV_EXPORTS void setOpenGlContext(const string& winname);
CV_EXPORTS void updateWindow(const string& winname);
// < Deperecated
CV_EXPORTS void pointCloudShow(const string& winname, const GlCamera& camera, const GlArrays& arr);
CV_EXPORTS void pointCloudShow(const string& winname, const GlCamera& camera, InputArray points, InputArray colors = noArray());
// >
//Only for Qt
CV_EXPORTS CvFont fontQt(const string& nameFont, int pointSize=-1,
Scalar color=Scalar::all(0), int weight=CV_FONT_NORMAL,
int style=CV_STYLE_NORMAL, int spacing=0);
CV_EXPORTS void addText( const Mat& img, const string& text, Point org, CvFont font);
CV_EXPORTS void displayOverlay(const string& winname, const string& text, int delayms CV_DEFAULT(0));
CV_EXPORTS void displayStatusBar(const string& winname, const string& text, int delayms CV_DEFAULT(0));
CV_EXPORTS void saveWindowParameters(const string& windowName);
CV_EXPORTS void loadWindowParameters(const string& windowName);
CV_EXPORTS int startLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]);
CV_EXPORTS void stopLoop();
typedef void (CV_CDECL *ButtonCallback)(int state, void* userdata);
CV_EXPORTS int createButton( const string& bar_name, ButtonCallback on_change,
void* userdata=NULL, int type=CV_PUSH_BUTTON,
bool initial_button_state=0);
//-------------------------
enum
{
// 8bit, color or not
IMREAD_UNCHANGED =-1,
// 8bit, gray
IMREAD_GRAYSCALE =0,
// ?, color
IMREAD_COLOR =1,
// any depth, ?
IMREAD_ANYDEPTH =2,
// ?, any color
IMREAD_ANYCOLOR =4
};
enum
{
IMWRITE_JPEG_QUALITY =1,
IMWRITE_PNG_COMPRESSION =16,
IMWRITE_PNG_STRATEGY =17,
IMWRITE_PNG_BILEVEL =18,
IMWRITE_PNG_STRATEGY_DEFAULT =0,
IMWRITE_PNG_STRATEGY_FILTERED =1,
IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2,
IMWRITE_PNG_STRATEGY_RLE =3,
IMWRITE_PNG_STRATEGY_FIXED =4,
IMWRITE_PXM_BINARY =32
};
CV_EXPORTS_W Mat imread( const string& filename, int flags=1 );
CV_EXPORTS_W bool imwrite( const string& filename, InputArray img,
const vector<int>& params=vector<int>());
CV_EXPORTS_W Mat imdecode( InputArray buf, int flags );
CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst );
CV_EXPORTS_W bool imencode( const string& ext, InputArray img,
CV_OUT vector<uchar>& buf,
const vector<int>& params=vector<int>());
#ifndef CV_NO_VIDEO_CAPTURE_CPP_API
template<> void CV_EXPORTS Ptr<CvCapture>::delete_obj();
template<> void CV_EXPORTS Ptr<CvVideoWriter>::delete_obj();
class CV_EXPORTS_W VideoCapture
{
public:
CV_WRAP VideoCapture();
CV_WRAP VideoCapture(const string& filename);
CV_WRAP VideoCapture(int device);
virtual ~VideoCapture();
CV_WRAP virtual bool open(const string& filename);
CV_WRAP virtual bool open(int device);
CV_WRAP virtual bool isOpened() const;
CV_WRAP virtual void release();
CV_WRAP virtual bool grab();
CV_WRAP virtual bool retrieve(CV_OUT Mat& image, int channel=0);
virtual VideoCapture& operator >> (CV_OUT Mat& image);
CV_WRAP virtual bool read(CV_OUT Mat& image);
CV_WRAP virtual bool set(int propId, double value);
CV_WRAP virtual double get(int propId);
protected:
Ptr<CvCapture> cap;
};
class CV_EXPORTS_W VideoWriter
{
public:
CV_WRAP VideoWriter();
CV_WRAP VideoWriter(const string& filename, int fourcc, double fps,
Size frameSize, bool isColor=true);
virtual ~VideoWriter();
CV_WRAP virtual bool open(const string& filename, int fourcc, double fps,
Size frameSize, bool isColor=true);
CV_WRAP virtual bool isOpened() const;
CV_WRAP virtual void release();
virtual VideoWriter& operator << (const Mat& image);
CV_WRAP virtual void write(const Mat& image);
protected:
Ptr<CvVideoWriter> writer;
};
#endif #endif
} #include "opencv2/highgui.hpp"
#endif
#endif

View File

@ -39,15 +39,26 @@
// //
//M*/ //M*/
#ifndef __OPENCV_HIGHGUI_H__ #ifndef OPENCV_HIGHGUI_H
#define __OPENCV_HIGHGUI_H__ #define OPENCV_HIGHGUI_H
#include "opencv2/core/core_c.h" #include "opencv2/core/core_c.h"
#include "opencv2/imgproc/imgproc_c.h"
#ifdef HAVE_OPENCV_IMGCODECS
#include "opencv2/imgcodecs/imgcodecs_c.h"
#endif
#ifdef HAVE_OPENCV_VIDEOIO
#include "opencv2/videoio/videoio_c.h"
#endif
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif /* __cplusplus */ #endif /* __cplusplus */
/** @addtogroup highgui_c
@{
*/
/****************************************************************************************\ /****************************************************************************************\
* Basic GUI functions * * Basic GUI functions *
\****************************************************************************************/ \****************************************************************************************/
@ -67,7 +78,7 @@ enum { CV_STYLE_NORMAL = 0,//QFont::StyleNormal,
}; };
/* ---------*/ /* ---------*/
//for color cvScalar(blue_component, green_component, red\_component[, alpha_component]) //for color cvScalar(blue_component, green_component, red_component[, alpha_component])
//and alpha= 0 <-> 0xFF (not transparent <-> transparent) //and alpha= 0 <-> 0xFF (not transparent <-> transparent)
CVAPI(CvFont) cvFontQt(const char* nameFont, int pointSize CV_DEFAULT(-1), CvScalar color CV_DEFAULT(cvScalarAll(0)), int weight CV_DEFAULT(CV_FONT_NORMAL), int style CV_DEFAULT(CV_STYLE_NORMAL), int spacing CV_DEFAULT(0)); CVAPI(CvFont) cvFontQt(const char* nameFont, int pointSize CV_DEFAULT(-1), CvScalar color CV_DEFAULT(cvScalarAll(0)), int weight CV_DEFAULT(CV_FONT_NORMAL), int style CV_DEFAULT(CV_STYLE_NORMAL), int spacing CV_DEFAULT(0));
@ -100,6 +111,7 @@ enum
CV_WND_PROP_AUTOSIZE = 1, //to change/get window's autosize property CV_WND_PROP_AUTOSIZE = 1, //to change/get window's autosize property
CV_WND_PROP_ASPECTRATIO= 2, //to change/get window's aspectratio property CV_WND_PROP_ASPECTRATIO= 2, //to change/get window's aspectratio property
CV_WND_PROP_OPENGL = 3, //to change/get window's opengl support CV_WND_PROP_OPENGL = 3, //to change/get window's opengl support
CV_WND_PROP_VISIBLE = 4,
//These 2 flags are used by cvNamedWindow and cvSet/GetWindowProperty //These 2 flags are used by cvNamedWindow and cvSet/GetWindowProperty
CV_WINDOW_NORMAL = 0x00000000, //the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size CV_WINDOW_NORMAL = 0x00000000, //the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size
@ -123,6 +135,11 @@ CVAPI(int) cvNamedWindow( const char* name, int flags CV_DEFAULT(CV_WINDOW_AUTOS
CVAPI(void) cvSetWindowProperty(const char* name, int prop_id, double prop_value); CVAPI(void) cvSetWindowProperty(const char* name, int prop_id, double prop_value);
CVAPI(double) cvGetWindowProperty(const char* name, int prop_id); CVAPI(double) cvGetWindowProperty(const char* name, int prop_id);
#ifdef __cplusplus // FIXIT remove in OpenCV 4.0
/* Get window image rectangle coordinates, width and height */
CVAPI(cv::Rect)cvGetWindowImageRect(const char* name);
#endif
/* display image within window (highgui windows remember their content) */ /* display image within window (highgui windows remember their content) */
CVAPI(void) cvShowImage( const char* name, const CvArr* image ); CVAPI(void) cvShowImage( const char* name, const CvArr* image );
@ -158,6 +175,8 @@ CVAPI(int) cvCreateTrackbar2( const char* trackbar_name, const char* window_name
/* retrieve or set trackbar position */ /* retrieve or set trackbar position */
CVAPI(int) cvGetTrackbarPos( const char* trackbar_name, const char* window_name ); CVAPI(int) cvGetTrackbarPos( const char* trackbar_name, const char* window_name );
CVAPI(void) cvSetTrackbarPos( const char* trackbar_name, const char* window_name, int pos ); CVAPI(void) cvSetTrackbarPos( const char* trackbar_name, const char* window_name, int pos );
CVAPI(void) cvSetTrackbarMax(const char* trackbar_name, const char* window_name, int maxval);
CVAPI(void) cvSetTrackbarMin(const char* trackbar_name, const char* window_name, int minval);
enum enum
{ {
@ -170,7 +189,9 @@ enum
CV_EVENT_MBUTTONUP =6, CV_EVENT_MBUTTONUP =6,
CV_EVENT_LBUTTONDBLCLK =7, CV_EVENT_LBUTTONDBLCLK =7,
CV_EVENT_RBUTTONDBLCLK =8, CV_EVENT_RBUTTONDBLCLK =8,
CV_EVENT_MBUTTONDBLCLK =9 CV_EVENT_MBUTTONDBLCLK =9,
CV_EVENT_MOUSEWHEEL =10,
CV_EVENT_MOUSEHWHEEL =11
}; };
enum enum
@ -183,70 +204,15 @@ enum
CV_EVENT_FLAG_ALTKEY =32 CV_EVENT_FLAG_ALTKEY =32
}; };
#define CV_GET_WHEEL_DELTA(flags) ((short)((flags >> 16) & 0xffff)) // upper 16 bits
typedef void (CV_CDECL *CvMouseCallback )(int event, int x, int y, int flags, void* param); typedef void (CV_CDECL *CvMouseCallback )(int event, int x, int y, int flags, void* param);
/* assign callback for mouse events */ /* assign callback for mouse events */
CVAPI(void) cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse, CVAPI(void) cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse,
void* param CV_DEFAULT(NULL)); void* param CV_DEFAULT(NULL));
enum
{
/* 8bit, color or not */
CV_LOAD_IMAGE_UNCHANGED =-1,
/* 8bit, gray */
CV_LOAD_IMAGE_GRAYSCALE =0,
/* ?, color */
CV_LOAD_IMAGE_COLOR =1,
/* any depth, ? */
CV_LOAD_IMAGE_ANYDEPTH =2,
/* ?, any color */
CV_LOAD_IMAGE_ANYCOLOR =4
};
/* load image from file
iscolor can be a combination of above flags where CV_LOAD_IMAGE_UNCHANGED
overrides the other flags
using CV_LOAD_IMAGE_ANYCOLOR alone is equivalent to CV_LOAD_IMAGE_UNCHANGED
unless CV_LOAD_IMAGE_ANYDEPTH is specified images are converted to 8bit
*/
CVAPI(IplImage*) cvLoadImage( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR));
CVAPI(CvMat*) cvLoadImageM( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR));
enum
{
CV_IMWRITE_JPEG_QUALITY =1,
CV_IMWRITE_PNG_COMPRESSION =16,
CV_IMWRITE_PNG_STRATEGY =17,
CV_IMWRITE_PNG_BILEVEL =18,
CV_IMWRITE_PNG_STRATEGY_DEFAULT =0,
CV_IMWRITE_PNG_STRATEGY_FILTERED =1,
CV_IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2,
CV_IMWRITE_PNG_STRATEGY_RLE =3,
CV_IMWRITE_PNG_STRATEGY_FIXED =4,
CV_IMWRITE_PXM_BINARY =32
};
/* save image to file */
CVAPI(int) cvSaveImage( const char* filename, const CvArr* image,
const int* params CV_DEFAULT(0) );
/* decode image stored in the buffer */
CVAPI(IplImage*) cvDecodeImage( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR));
CVAPI(CvMat*) cvDecodeImageM( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR));
/* encode image and store the result as a byte vector (single-row 8uC1 matrix) */
CVAPI(CvMat*) cvEncodeImage( const char* ext, const CvArr* image,
const int* params CV_DEFAULT(0) );
enum
{
CV_CVTIMG_FLIP =1,
CV_CVTIMG_SWAP_RB =2
};
/* utility function: convert one image to another with optional vertical flip */
CVAPI(void) cvConvertImage( const CvArr* src, CvArr* dst, int flags CV_DEFAULT(0));
/* wait for key event infinitely (delay<=0) or for "delay" milliseconds */ /* wait for key event infinitely (delay<=0) or for "delay" milliseconds */
CVAPI(int) cvWaitKey(int delay CV_DEFAULT(0)); CVAPI(int) cvWaitKey(int delay CV_DEFAULT(0));
@ -260,363 +226,10 @@ CVAPI(void) cvUpdateWindow(const char* window_name);
/****************************************************************************************\ /****************************************************************************************\
* Working with Video Files and Cameras *
\****************************************************************************************/
/* "black box" capture structure */
typedef struct CvCapture CvCapture;
/* start capturing frames from video file */
CVAPI(CvCapture*) cvCreateFileCapture( const char* filename );
enum
{
CV_CAP_ANY =0, // autodetect
CV_CAP_MIL =100, // MIL proprietary drivers
CV_CAP_VFW =200, // platform native
CV_CAP_V4L =200,
CV_CAP_V4L2 =200,
CV_CAP_FIREWARE =300, // IEEE 1394 drivers
CV_CAP_FIREWIRE =300,
CV_CAP_IEEE1394 =300,
CV_CAP_DC1394 =300,
CV_CAP_CMU1394 =300,
CV_CAP_STEREO =400, // TYZX proprietary drivers
CV_CAP_TYZX =400,
CV_TYZX_LEFT =400,
CV_TYZX_RIGHT =401,
CV_TYZX_COLOR =402,
CV_TYZX_Z =403,
CV_CAP_QT =500, // QuickTime
CV_CAP_UNICAP =600, // Unicap drivers
CV_CAP_DSHOW =700, // DirectShow (via videoInput)
CV_CAP_MSMF =1400, // Microsoft Media Foundation (via videoInput)
CV_CAP_PVAPI =800, // PvAPI, Prosilica GigE SDK
CV_CAP_OPENNI =900, // OpenNI (for Kinect)
CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion)
CV_CAP_ANDROID =1000, // Android
CV_CAP_ANDROID_BACK =CV_CAP_ANDROID+99, // Android back camera
CV_CAP_ANDROID_FRONT =CV_CAP_ANDROID+98, // Android front camera
CV_CAP_XIAPI =1100, // XIMEA Camera API
CV_CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API)
CV_CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK
CV_CAP_INTELPERC = 1500 // Intel Perceptual Computing SDK
};
/* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */
CVAPI(CvCapture*) cvCreateCameraCapture( int index );
/* grab a frame, return 1 on success, 0 on fail.
this function is thought to be fast */
CVAPI(int) cvGrabFrame( CvCapture* capture );
/* get the frame grabbed with cvGrabFrame(..)
This function may apply some frame processing like
frame decompression, flipping etc.
!!!DO NOT RELEASE or MODIFY the retrieved frame!!! */
CVAPI(IplImage*) cvRetrieveFrame( CvCapture* capture, int streamIdx CV_DEFAULT(0) );
/* Just a combination of cvGrabFrame and cvRetrieveFrame
!!!DO NOT RELEASE or MODIFY the retrieved frame!!! */
CVAPI(IplImage*) cvQueryFrame( CvCapture* capture );
/* stop capturing/reading and free resources */
CVAPI(void) cvReleaseCapture( CvCapture** capture );
enum
{
// modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode)
// every feature can have only one mode turned on at a time
CV_CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically)
CV_CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user
CV_CAP_PROP_DC1394_MODE_AUTO = -2,
CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1,
CV_CAP_PROP_POS_MSEC =0,
CV_CAP_PROP_POS_FRAMES =1,
CV_CAP_PROP_POS_AVI_RATIO =2,
CV_CAP_PROP_FRAME_WIDTH =3,
CV_CAP_PROP_FRAME_HEIGHT =4,
CV_CAP_PROP_FPS =5,
CV_CAP_PROP_FOURCC =6,
CV_CAP_PROP_FRAME_COUNT =7,
CV_CAP_PROP_FORMAT =8,
CV_CAP_PROP_MODE =9,
CV_CAP_PROP_BRIGHTNESS =10,
CV_CAP_PROP_CONTRAST =11,
CV_CAP_PROP_SATURATION =12,
CV_CAP_PROP_HUE =13,
CV_CAP_PROP_GAIN =14,
CV_CAP_PROP_EXPOSURE =15,
CV_CAP_PROP_CONVERT_RGB =16,
CV_CAP_PROP_WHITE_BALANCE_BLUE_U =17,
CV_CAP_PROP_RECTIFICATION =18,
CV_CAP_PROP_MONOCROME =19,
CV_CAP_PROP_SHARPNESS =20,
CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera,
// user can adjust refernce level
// using this feature
CV_CAP_PROP_GAMMA =22,
CV_CAP_PROP_TEMPERATURE =23,
CV_CAP_PROP_TRIGGER =24,
CV_CAP_PROP_TRIGGER_DELAY =25,
CV_CAP_PROP_WHITE_BALANCE_RED_V =26,
CV_CAP_PROP_ZOOM =27,
CV_CAP_PROP_FOCUS =28,
CV_CAP_PROP_GUID =29,
CV_CAP_PROP_ISO_SPEED =30,
CV_CAP_PROP_MAX_DC1394 =31,
CV_CAP_PROP_BACKLIGHT =32,
CV_CAP_PROP_PAN =33,
CV_CAP_PROP_TILT =34,
CV_CAP_PROP_ROLL =35,
CV_CAP_PROP_IRIS =36,
CV_CAP_PROP_SETTINGS =37,
CV_CAP_PROP_AUTOGRAB =1024, // property for highgui class CvCapture_Android only
CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING=1025, // readonly, tricky property, returns cpnst char* indeed
CV_CAP_PROP_PREVIEW_FORMAT=1026, // readonly, tricky property, returns cpnst char* indeed
// OpenNI map generators
CV_CAP_OPENNI_DEPTH_GENERATOR = 1 << 31,
CV_CAP_OPENNI_IMAGE_GENERATOR = 1 << 30,
CV_CAP_OPENNI_GENERATORS_MASK = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_OPENNI_IMAGE_GENERATOR,
// Properties of cameras available through OpenNI interfaces
CV_CAP_PROP_OPENNI_OUTPUT_MODE = 100,
CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm
CV_CAP_PROP_OPENNI_BASELINE = 102, // in mm
CV_CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels
CV_CAP_PROP_OPENNI_REGISTRATION = 104, // flag
CV_CAP_PROP_OPENNI_REGISTRATION_ON = CV_CAP_PROP_OPENNI_REGISTRATION, // flag that synchronizes the remapping depth map to image map
// by changing depth generator's view point (if the flag is "on") or
// sets this view point to its normal one (if the flag is "off").
CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105,
CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106,
CV_CAP_PROP_OPENNI_CIRCLE_BUFFER = 107,
CV_CAP_PROP_OPENNI_MAX_TIME_DURATION = 108,
CV_CAP_PROP_OPENNI_GENERATOR_PRESENT = 109,
CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT,
CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_OUTPUT_MODE,
CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_BASELINE,
CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH,
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION,
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION,
// Properties of cameras available through GStreamer interface
CV_CAP_GSTREAMER_QUEUE_LENGTH = 200, // default is 1
CV_CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast
// Properties of cameras available through XIMEA SDK interface
CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
CV_CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
CV_CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels).
CV_CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels).
CV_CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger.
CV_CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
CV_CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode
CV_CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level
CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
CV_CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode
CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
CV_CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality
CV_CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition)
CV_CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance
CV_CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain
CV_CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%).
CV_CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure
CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure
CV_CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %)
CV_CAP_PROP_XI_TIMEOUT = 420, // Image capture timeout in milliseconds
// Properties for Android cameras
CV_CAP_PROP_ANDROID_FLASH_MODE = 8001,
CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002,
CV_CAP_PROP_ANDROID_WHITE_BALANCE = 8003,
CV_CAP_PROP_ANDROID_ANTIBANDING = 8004,
CV_CAP_PROP_ANDROID_FOCAL_LENGTH = 8005,
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006,
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007,
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008,
CV_CAP_PROP_ANDROID_EXPOSE_LOCK = 8009,
CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK = 8010,
// Properties of cameras available through AVFOUNDATION interface
CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001,
CV_CAP_PROP_IOS_DEVICE_EXPOSURE = 9002,
CV_CAP_PROP_IOS_DEVICE_FLASH = 9003,
CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004,
CV_CAP_PROP_IOS_DEVICE_TORCH = 9005,
// Properties of cameras available through Smartek Giganetix Ethernet Vision interface
/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */
CV_CAP_PROP_GIGA_FRAME_OFFSET_X = 10001,
CV_CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002,
CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003,
CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004,
CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005,
CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006,
CV_CAP_PROP_INTELPERC_PROFILE_COUNT = 11001,
CV_CAP_PROP_INTELPERC_PROFILE_IDX = 11002,
CV_CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003,
CV_CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004,
CV_CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005,
CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006,
CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007,
// Intel PerC streams
CV_CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29,
CV_CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28,
CV_CAP_INTELPERC_GENERATORS_MASK = CV_CAP_INTELPERC_DEPTH_GENERATOR + CV_CAP_INTELPERC_IMAGE_GENERATOR
};
enum
{
// Data given from depth generator.
CV_CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1)
CV_CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3)
CV_CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1)
CV_CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1)
CV_CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1
// Data given from RGB image generator.
CV_CAP_OPENNI_BGR_IMAGE = 5,
CV_CAP_OPENNI_GRAY_IMAGE = 6
};
// Supported output modes of OpenNI image generator
enum
{
CV_CAP_OPENNI_VGA_30HZ = 0,
CV_CAP_OPENNI_SXGA_15HZ = 1,
CV_CAP_OPENNI_SXGA_30HZ = 2,
CV_CAP_OPENNI_QVGA_30HZ = 3,
CV_CAP_OPENNI_QVGA_60HZ = 4
};
//supported by Android camera output formats
enum
{
CV_CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR
CV_CAP_ANDROID_COLOR_FRAME = CV_CAP_ANDROID_COLOR_FRAME_BGR,
CV_CAP_ANDROID_GREY_FRAME = 1, //Y
CV_CAP_ANDROID_COLOR_FRAME_RGB = 2,
CV_CAP_ANDROID_COLOR_FRAME_BGRA = 3,
CV_CAP_ANDROID_COLOR_FRAME_RGBA = 4
};
// supported Android camera flash modes
enum
{
CV_CAP_ANDROID_FLASH_MODE_AUTO = 0,
CV_CAP_ANDROID_FLASH_MODE_OFF,
CV_CAP_ANDROID_FLASH_MODE_ON,
CV_CAP_ANDROID_FLASH_MODE_RED_EYE,
CV_CAP_ANDROID_FLASH_MODE_TORCH
};
// supported Android camera focus modes
enum
{
CV_CAP_ANDROID_FOCUS_MODE_AUTO = 0,
CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_PICTURE,
CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO,
CV_CAP_ANDROID_FOCUS_MODE_EDOF,
CV_CAP_ANDROID_FOCUS_MODE_FIXED,
CV_CAP_ANDROID_FOCUS_MODE_INFINITY,
CV_CAP_ANDROID_FOCUS_MODE_MACRO
};
// supported Android camera white balance modes
enum
{
CV_CAP_ANDROID_WHITE_BALANCE_AUTO = 0,
CV_CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT,
CV_CAP_ANDROID_WHITE_BALANCE_DAYLIGHT,
CV_CAP_ANDROID_WHITE_BALANCE_FLUORESCENT,
CV_CAP_ANDROID_WHITE_BALANCE_INCANDESCENT,
CV_CAP_ANDROID_WHITE_BALANCE_SHADE,
CV_CAP_ANDROID_WHITE_BALANCE_TWILIGHT,
CV_CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT
};
// supported Android camera antibanding modes
enum
{
CV_CAP_ANDROID_ANTIBANDING_50HZ = 0,
CV_CAP_ANDROID_ANTIBANDING_60HZ,
CV_CAP_ANDROID_ANTIBANDING_AUTO,
CV_CAP_ANDROID_ANTIBANDING_OFF
};
enum
{
CV_CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth.
CV_CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates.
CV_CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam.
CV_CAP_INTELPERC_IMAGE = 3
};
/* retrieve or set capture properties */
CVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id );
CVAPI(int) cvSetCaptureProperty( CvCapture* capture, int property_id, double value );
// Return the type of the capturer (eg, CV_CAP_V4W, CV_CAP_UNICAP), which is unknown if created with CV_CAP_ANY
CVAPI(int) cvGetCaptureDomain( CvCapture* capture);
/* "black box" video file writer structure */
typedef struct CvVideoWriter CvVideoWriter;
#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24))
CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4)
{
return CV_FOURCC_MACRO(c1, c2, c3, c4);
}
#define CV_FOURCC_PROMPT -1 /* Open Codec Selection Dialog (Windows only) */
#define CV_FOURCC_DEFAULT CV_FOURCC('I', 'Y', 'U', 'V') /* Use default codec for specified filename (Linux only) */
/* initialize video file writer */
CVAPI(CvVideoWriter*) cvCreateVideoWriter( const char* filename, int fourcc,
double fps, CvSize frame_size,
int is_color CV_DEFAULT(1));
//CVAPI(CvVideoWriter*) cvCreateImageSequenceWriter( const char* filename,
// int is_color CV_DEFAULT(1));
/* write frame to video file */
CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image );
/* close video file writer */
CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer );
/****************************************************************************************\
* Obsolete functions/synonyms * * Obsolete functions/synonyms *
\****************************************************************************************/ \****************************************************************************************/
#define cvCaptureFromFile cvCreateFileCapture
#define cvCaptureFromCAM cvCreateCameraCapture
#define cvCaptureFromAVI cvCaptureFromFile
#define cvCreateAVIWriter cvCreateVideoWriter
#define cvWriteToAVI cvWriteFrame
#define cvAddSearchPath(path) #define cvAddSearchPath(path)
#define cvvInitSystem cvInitSystem #define cvvInitSystem cvInitSystem
#define cvvNamedWindow cvNamedWindow #define cvvNamedWindow cvNamedWindow
@ -624,17 +237,14 @@ CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer );
#define cvvResizeWindow cvResizeWindow #define cvvResizeWindow cvResizeWindow
#define cvvDestroyWindow cvDestroyWindow #define cvvDestroyWindow cvDestroyWindow
#define cvvCreateTrackbar cvCreateTrackbar #define cvvCreateTrackbar cvCreateTrackbar
#define cvvLoadImage(name) cvLoadImage((name),1)
#define cvvSaveImage cvSaveImage
#define cvvAddSearchPath cvAddSearchPath #define cvvAddSearchPath cvAddSearchPath
#define cvvWaitKey(name) cvWaitKey(0) #define cvvWaitKey(name) cvWaitKey(0)
#define cvvWaitKeyEx(name,delay) cvWaitKey(delay) #define cvvWaitKeyEx(name,delay) cvWaitKey(delay)
#define cvvConvertImage cvConvertImage
#define HG_AUTOSIZE CV_WINDOW_AUTOSIZE #define HG_AUTOSIZE CV_WINDOW_AUTOSIZE
#define set_preprocess_func cvSetPreprocessFuncWin32 #define set_preprocess_func cvSetPreprocessFuncWin32
#define set_postprocess_func cvSetPostprocessFuncWin32 #define set_postprocess_func cvSetPostprocessFuncWin32
#if defined WIN32 || defined _WIN32 #if defined _WIN32
CVAPI(void) cvSetPreprocessFuncWin32_(const void* callback); CVAPI(void) cvSetPreprocessFuncWin32_(const void* callback);
CVAPI(void) cvSetPostprocessFuncWin32_(const void* callback); CVAPI(void) cvSetPostprocessFuncWin32_(const void* callback);
@ -643,6 +253,8 @@ CVAPI(void) cvSetPostprocessFuncWin32_(const void* callback);
#endif #endif
/** @} highgui_c */
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -40,8 +40,8 @@
// //
//M*/ //M*/
#ifndef __OPENCV_IMGPROC_TYPES_C_H__ #ifndef OPENCV_IMGPROC_TYPES_C_H
#define __OPENCV_IMGPROC_TYPES_C_H__ #define OPENCV_IMGPROC_TYPES_C_H
#include "opencv2/core/core_c.h" #include "opencv2/core/core_c.h"
@ -49,41 +49,55 @@
extern "C" { extern "C" {
#endif #endif
/* Connected component structure */ /** @addtogroup imgproc_c
@{
*/
/** Connected component structure */
typedef struct CvConnectedComp typedef struct CvConnectedComp
{ {
double area; /* area of the connected component */ double area; /**<area of the connected component */
CvScalar value; /* average color of the connected component */ CvScalar value; /**<average color of the connected component */
CvRect rect; /* ROI of the component */ CvRect rect; /**<ROI of the component */
CvSeq* contour; /* optional component boundary CvSeq* contour; /**<optional component boundary
(the contour might have child contours corresponding to the holes)*/ (the contour might have child contours corresponding to the holes)*/
} }
CvConnectedComp; CvConnectedComp;
/* Image smooth methods */ /** Image smooth methods */
enum enum SmoothMethod_c
{ {
/** linear convolution with \f$\texttt{size1}\times\texttt{size2}\f$ box kernel (all 1's). If
you want to smooth different pixels with different-size box kernels, you can use the integral
image that is computed using integral */
CV_BLUR_NO_SCALE =0, CV_BLUR_NO_SCALE =0,
/** linear convolution with \f$\texttt{size1}\times\texttt{size2}\f$ box kernel (all
1's) with subsequent scaling by \f$1/(\texttt{size1}\cdot\texttt{size2})\f$ */
CV_BLUR =1, CV_BLUR =1,
/** linear convolution with a \f$\texttt{size1}\times\texttt{size2}\f$ Gaussian kernel */
CV_GAUSSIAN =2, CV_GAUSSIAN =2,
/** median filter with a \f$\texttt{size1}\times\texttt{size1}\f$ square aperture */
CV_MEDIAN =3, CV_MEDIAN =3,
/** bilateral filter with a \f$\texttt{size1}\times\texttt{size1}\f$ square aperture, color
sigma= sigma1 and spatial sigma= sigma2. If size1=0, the aperture square side is set to
cvRound(sigma2\*1.5)\*2+1. See cv::bilateralFilter */
CV_BILATERAL =4 CV_BILATERAL =4
}; };
/* Filters used in pyramid decomposition */ /** Filters used in pyramid decomposition */
enum enum
{ {
CV_GAUSSIAN_5x5 = 7 CV_GAUSSIAN_5x5 = 7
}; };
/* Special filters */ /** Special filters */
enum enum
{ {
CV_SCHARR =-1, CV_SCHARR =-1,
CV_MAX_SOBEL_KSIZE =7 CV_MAX_SOBEL_KSIZE =7
}; };
/* Constants for color conversion */ /** Constants for color conversion */
enum enum
{ {
CV_BGR2BGRA =0, CV_BGR2BGRA =0,
@ -324,11 +338,32 @@ enum
CV_RGBA2YUV_YV12 = 133, CV_RGBA2YUV_YV12 = 133,
CV_BGRA2YUV_YV12 = 134, CV_BGRA2YUV_YV12 = 134,
CV_COLORCVT_MAX = 135 // Edge-Aware Demosaicing
CV_BayerBG2BGR_EA = 135,
CV_BayerGB2BGR_EA = 136,
CV_BayerRG2BGR_EA = 137,
CV_BayerGR2BGR_EA = 138,
CV_BayerBG2RGB_EA = CV_BayerRG2BGR_EA,
CV_BayerGB2RGB_EA = CV_BayerGR2BGR_EA,
CV_BayerRG2RGB_EA = CV_BayerBG2BGR_EA,
CV_BayerGR2RGB_EA = CV_BayerGB2BGR_EA,
CV_BayerBG2BGRA =139,
CV_BayerGB2BGRA =140,
CV_BayerRG2BGRA =141,
CV_BayerGR2BGRA =142,
CV_BayerBG2RGBA =CV_BayerRG2BGRA,
CV_BayerGB2RGBA =CV_BayerGR2BGRA,
CV_BayerRG2RGBA =CV_BayerBG2BGRA,
CV_BayerGR2RGBA =CV_BayerGB2BGRA,
CV_COLORCVT_MAX = 143
}; };
/* Sub-pixel interpolation methods */ /** Sub-pixel interpolation methods */
enum enum
{ {
CV_INTER_NN =0, CV_INTER_NN =0,
@ -338,23 +373,25 @@ enum
CV_INTER_LANCZOS4 =4 CV_INTER_LANCZOS4 =4
}; };
/* ... and other image warping flags */ /** ... and other image warping flags */
enum enum
{ {
CV_WARP_FILL_OUTLIERS =8, CV_WARP_FILL_OUTLIERS =8,
CV_WARP_INVERSE_MAP =16 CV_WARP_INVERSE_MAP =16
}; };
/* Shapes of a structuring element for morphological operations */ /** Shapes of a structuring element for morphological operations
enum @see cv::MorphShapes, cv::getStructuringElement
*/
enum MorphShapes_c
{ {
CV_SHAPE_RECT =0, CV_SHAPE_RECT =0,
CV_SHAPE_CROSS =1, CV_SHAPE_CROSS =1,
CV_SHAPE_ELLIPSE =2, CV_SHAPE_ELLIPSE =2,
CV_SHAPE_CUSTOM =100 CV_SHAPE_CUSTOM =100 //!< custom structuring element
}; };
/* Morphological operations */ /** Morphological operations */
enum enum
{ {
CV_MOP_ERODE =0, CV_MOP_ERODE =0,
@ -366,23 +403,71 @@ enum
CV_MOP_BLACKHAT =6 CV_MOP_BLACKHAT =6
}; };
/* Spatial and central moments */ /** Spatial and central moments */
typedef struct CvMoments typedef struct CvMoments
{ {
double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; /* spatial moments */ double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; /**< spatial moments */
double mu20, mu11, mu02, mu30, mu21, mu12, mu03; /* central moments */ double mu20, mu11, mu02, mu30, mu21, mu12, mu03; /**< central moments */
double inv_sqrt_m00; /* m00 != 0 ? 1/sqrt(m00) : 0 */ double inv_sqrt_m00; /**< m00 != 0 ? 1/sqrt(m00) : 0 */
#if defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)
CvMoments(){}
CvMoments(const cv::Moments& m)
{
m00 = m.m00; m10 = m.m10; m01 = m.m01;
m20 = m.m20; m11 = m.m11; m02 = m.m02;
m30 = m.m30; m21 = m.m21; m12 = m.m12; m03 = m.m03;
mu20 = m.mu20; mu11 = m.mu11; mu02 = m.mu02;
mu30 = m.mu30; mu21 = m.mu21; mu12 = m.mu12; mu03 = m.mu03;
double am00 = std::abs(m.m00);
inv_sqrt_m00 = am00 > DBL_EPSILON ? 1./std::sqrt(am00) : 0;
}
operator cv::Moments() const
{
return cv::Moments(m00, m10, m01, m20, m11, m02, m30, m21, m12, m03);
}
#endif
} }
CvMoments; CvMoments;
/* Hu invariants */ #ifdef __cplusplus
} // extern "C"
CV_INLINE CvMoments cvMoments()
{
#if !defined(CV__ENABLE_C_API_CTORS)
CvMoments self = CV_STRUCT_INITIALIZER; return self;
#else
return CvMoments();
#endif
}
CV_INLINE CvMoments cvMoments(const cv::Moments& m)
{
#if !defined(CV__ENABLE_C_API_CTORS)
double am00 = std::abs(m.m00);
CvMoments self = {
m.m00, m.m10, m.m01, m.m20, m.m11, m.m02, m.m30, m.m21, m.m12, m.m03,
m.mu20, m.mu11, m.mu02, m.mu30, m.mu21, m.mu12, m.mu03,
am00 > DBL_EPSILON ? 1./std::sqrt(am00) : 0
};
return self;
#else
return CvMoments(m);
#endif
}
extern "C" {
#endif // __cplusplus
/** Hu invariants */
typedef struct CvHuMoments typedef struct CvHuMoments
{ {
double hu1, hu2, hu3, hu4, hu5, hu6, hu7; /* Hu invariants */ double hu1, hu2, hu3, hu4, hu5, hu6, hu7; /**< Hu invariants */
} }
CvHuMoments; CvHuMoments;
/* Template matching methods */ /** Template matching methods */
enum enum
{ {
CV_TM_SQDIFF =0, CV_TM_SQDIFF =0,
@ -395,7 +480,7 @@ enum
typedef float (CV_CDECL * CvDistanceFunction)( const float* a, const float* b, void* user_param ); typedef float (CV_CDECL * CvDistanceFunction)( const float* a, const float* b, void* user_param );
/* Contour retrieval modes */ /** Contour retrieval modes */
enum enum
{ {
CV_RETR_EXTERNAL=0, CV_RETR_EXTERNAL=0,
@ -405,7 +490,7 @@ enum
CV_RETR_FLOODFILL=4 CV_RETR_FLOODFILL=4
}; };
/* Contour approximation methods */ /** Contour approximation methods */
enum enum
{ {
CV_CHAIN_CODE=0, CV_CHAIN_CODE=0,
@ -417,12 +502,12 @@ enum
}; };
/* /*
Internal structure that is used for sequental retrieving contours from the image. Internal structure that is used for sequential retrieving contours from the image.
It supports both hierarchical and plane variants of Suzuki algorithm. It supports both hierarchical and plane variants of Suzuki algorithm.
*/ */
typedef struct _CvContourScanner* CvContourScanner; typedef struct _CvContourScanner* CvContourScanner;
/* Freeman chain reader state */ /** Freeman chain reader state */
typedef struct CvChainPtReader typedef struct CvChainPtReader
{ {
CV_SEQ_READER_FIELDS() CV_SEQ_READER_FIELDS()
@ -432,7 +517,7 @@ typedef struct CvChainPtReader
} }
CvChainPtReader; CvChainPtReader;
/* initializes 8-element array for fast access to 3x3 neighborhood of a pixel */ /** initializes 8-element array for fast access to 3x3 neighborhood of a pixel */
#define CV_INIT_3X3_DELTAS( deltas, step, nch ) \ #define CV_INIT_3X3_DELTAS( deltas, step, nch ) \
((deltas)[0] = (nch), (deltas)[1] = -(step) + (nch), \ ((deltas)[0] = (nch), (deltas)[1] = -(step) + (nch), \
(deltas)[2] = -(step), (deltas)[3] = -(step) - (nch), \ (deltas)[2] = -(step), (deltas)[3] = -(step) - (nch), \
@ -440,94 +525,21 @@ CvChainPtReader;
(deltas)[6] = (step), (deltas)[7] = (step) + (nch)) (deltas)[6] = (step), (deltas)[7] = (step) + (nch))
/****************************************************************************************\ /** Contour approximation algorithms */
* Planar subdivisions *
\****************************************************************************************/
typedef size_t CvSubdiv2DEdge;
#define CV_QUADEDGE2D_FIELDS() \
int flags; \
struct CvSubdiv2DPoint* pt[4]; \
CvSubdiv2DEdge next[4];
#define CV_SUBDIV2D_POINT_FIELDS()\
int flags; \
CvSubdiv2DEdge first; \
CvPoint2D32f pt; \
int id;
#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30)
typedef struct CvQuadEdge2D
{
CV_QUADEDGE2D_FIELDS()
}
CvQuadEdge2D;
typedef struct CvSubdiv2DPoint
{
CV_SUBDIV2D_POINT_FIELDS()
}
CvSubdiv2DPoint;
#define CV_SUBDIV2D_FIELDS() \
CV_GRAPH_FIELDS() \
int quad_edges; \
int is_geometry_valid; \
CvSubdiv2DEdge recent_edge; \
CvPoint2D32f topleft; \
CvPoint2D32f bottomright;
typedef struct CvSubdiv2D
{
CV_SUBDIV2D_FIELDS()
}
CvSubdiv2D;
typedef enum CvSubdiv2DPointLocation
{
CV_PTLOC_ERROR = -2,
CV_PTLOC_OUTSIDE_RECT = -1,
CV_PTLOC_INSIDE = 0,
CV_PTLOC_VERTEX = 1,
CV_PTLOC_ON_EDGE = 2
}
CvSubdiv2DPointLocation;
typedef enum CvNextEdgeType
{
CV_NEXT_AROUND_ORG = 0x00,
CV_NEXT_AROUND_DST = 0x22,
CV_PREV_AROUND_ORG = 0x11,
CV_PREV_AROUND_DST = 0x33,
CV_NEXT_AROUND_LEFT = 0x13,
CV_NEXT_AROUND_RIGHT = 0x31,
CV_PREV_AROUND_LEFT = 0x20,
CV_PREV_AROUND_RIGHT = 0x02
}
CvNextEdgeType;
/* get the next edge with the same origin point (counterwise) */
#define CV_SUBDIV2D_NEXT_EDGE( edge ) (((CvQuadEdge2D*)((edge) & ~3))->next[(edge)&3])
/* Contour approximation algorithms */
enum enum
{ {
CV_POLY_APPROX_DP = 0 CV_POLY_APPROX_DP = 0
}; };
/* Shape matching methods */ /** Shape matching methods */
enum enum
{ {
CV_CONTOURS_MATCH_I1 =1, CV_CONTOURS_MATCH_I1 =1, //!< \f[I_1(A,B) = \sum _{i=1...7} \left | \frac{1}{m^A_i} - \frac{1}{m^B_i} \right |\f]
CV_CONTOURS_MATCH_I2 =2, CV_CONTOURS_MATCH_I2 =2, //!< \f[I_2(A,B) = \sum _{i=1...7} \left | m^A_i - m^B_i \right |\f]
CV_CONTOURS_MATCH_I3 =3 CV_CONTOURS_MATCH_I3 =3 //!< \f[I_3(A,B) = \max _{i=1...7} \frac{ \left| m^A_i - m^B_i \right| }{ \left| m^A_i \right| }\f]
}; };
/* Shape orientation */ /** Shape orientation */
enum enum
{ {
CV_CLOCKWISE =1, CV_CLOCKWISE =1,
@ -535,27 +547,29 @@ enum
}; };
/* Convexity defect */ /** Convexity defect */
typedef struct CvConvexityDefect typedef struct CvConvexityDefect
{ {
CvPoint* start; /* point of the contour where the defect begins */ CvPoint* start; /**< point of the contour where the defect begins */
CvPoint* end; /* point of the contour where the defect ends */ CvPoint* end; /**< point of the contour where the defect ends */
CvPoint* depth_point; /* the farthest from the convex hull point within the defect */ CvPoint* depth_point; /**< the farthest from the convex hull point within the defect */
float depth; /* distance between the farthest point and the convex hull */ float depth; /**< distance between the farthest point and the convex hull */
} CvConvexityDefect; } CvConvexityDefect;
/* Histogram comparison methods */ /** Histogram comparison methods */
enum enum
{ {
CV_COMP_CORREL =0, CV_COMP_CORREL =0,
CV_COMP_CHISQR =1, CV_COMP_CHISQR =1,
CV_COMP_INTERSECT =2, CV_COMP_INTERSECT =2,
CV_COMP_BHATTACHARYYA =3, CV_COMP_BHATTACHARYYA =3,
CV_COMP_HELLINGER =CV_COMP_BHATTACHARYYA CV_COMP_HELLINGER =CV_COMP_BHATTACHARYYA,
CV_COMP_CHISQR_ALT =4,
CV_COMP_KL_DIV =5
}; };
/* Mask size for distance transform */ /** Mask size for distance transform */
enum enum
{ {
CV_DIST_MASK_3 =3, CV_DIST_MASK_3 =3,
@ -563,48 +577,51 @@ enum
CV_DIST_MASK_PRECISE =0 CV_DIST_MASK_PRECISE =0
}; };
/* Content of output label array: connected components or pixels */ /** Content of output label array: connected components or pixels */
enum enum
{ {
CV_DIST_LABEL_CCOMP = 0, CV_DIST_LABEL_CCOMP = 0,
CV_DIST_LABEL_PIXEL = 1 CV_DIST_LABEL_PIXEL = 1
}; };
/* Distance types for Distance Transform and M-estimators */ /** Distance types for Distance Transform and M-estimators */
enum enum
{ {
CV_DIST_USER =-1, /* User defined distance */ CV_DIST_USER =-1, /**< User defined distance */
CV_DIST_L1 =1, /* distance = |x1-x2| + |y1-y2| */ CV_DIST_L1 =1, /**< distance = |x1-x2| + |y1-y2| */
CV_DIST_L2 =2, /* the simple euclidean distance */ CV_DIST_L2 =2, /**< the simple euclidean distance */
CV_DIST_C =3, /* distance = max(|x1-x2|,|y1-y2|) */ CV_DIST_C =3, /**< distance = max(|x1-x2|,|y1-y2|) */
CV_DIST_L12 =4, /* L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */ CV_DIST_L12 =4, /**< L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */
CV_DIST_FAIR =5, /* distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */ CV_DIST_FAIR =5, /**< distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */
CV_DIST_WELSCH =6, /* distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */ CV_DIST_WELSCH =6, /**< distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */
CV_DIST_HUBER =7 /* distance = |x|<c ? x^2/2 : c(|x|-c/2), c=1.345 */ CV_DIST_HUBER =7 /**< distance = |x|<c ? x^2/2 : c(|x|-c/2), c=1.345 */
}; };
/* Threshold types */ /** Threshold types */
enum enum
{ {
CV_THRESH_BINARY =0, /* value = value > threshold ? max_value : 0 */ CV_THRESH_BINARY =0, /**< value = value > threshold ? max_value : 0 */
CV_THRESH_BINARY_INV =1, /* value = value > threshold ? 0 : max_value */ CV_THRESH_BINARY_INV =1, /**< value = value > threshold ? 0 : max_value */
CV_THRESH_TRUNC =2, /* value = value > threshold ? threshold : value */ CV_THRESH_TRUNC =2, /**< value = value > threshold ? threshold : value */
CV_THRESH_TOZERO =3, /* value = value > threshold ? value : 0 */ CV_THRESH_TOZERO =3, /**< value = value > threshold ? value : 0 */
CV_THRESH_TOZERO_INV =4, /* value = value > threshold ? 0 : value */ CV_THRESH_TOZERO_INV =4, /**< value = value > threshold ? 0 : value */
CV_THRESH_MASK =7, CV_THRESH_MASK =7,
CV_THRESH_OTSU =8 /* use Otsu algorithm to choose the optimal threshold value; CV_THRESH_OTSU =8, /**< use Otsu algorithm to choose the optimal threshold value;
combine the flag with one of the above CV_THRESH_* values */ combine the flag with one of the above CV_THRESH_* values */
CV_THRESH_TRIANGLE =16 /**< use Triangle algorithm to choose the optimal threshold value;
combine the flag with one of the above CV_THRESH_* values, but not
with CV_THRESH_OTSU */
}; };
/* Adaptive threshold methods */ /** Adaptive threshold methods */
enum enum
{ {
CV_ADAPTIVE_THRESH_MEAN_C =0, CV_ADAPTIVE_THRESH_MEAN_C =0,
CV_ADAPTIVE_THRESH_GAUSSIAN_C =1 CV_ADAPTIVE_THRESH_GAUSSIAN_C =1
}; };
/* FloodFill flags */ /** FloodFill flags */
enum enum
{ {
CV_FLOODFILL_FIXED_RANGE =(1 << 16), CV_FLOODFILL_FIXED_RANGE =(1 << 16),
@ -612,13 +629,13 @@ enum
}; };
/* Canny edge detector flags */ /** Canny edge detector flags */
enum enum
{ {
CV_CANNY_L2_GRADIENT =(1 << 31) CV_CANNY_L2_GRADIENT =(1 << 31)
}; };
/* Variants of a Hough transform */ /** Variants of a Hough transform */
enum enum
{ {
CV_HOUGH_STANDARD =0, CV_HOUGH_STANDARD =0,
@ -633,6 +650,8 @@ struct CvFeatureTree;
struct CvLSH; struct CvLSH;
struct CvLSHOperations; struct CvLSHOperations;
/** @} */
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -40,22 +40,100 @@
// //
//M*/ //M*/
#ifndef __OPENCV_ALL_HPP__ #ifndef OPENCV_ALL_HPP
#define __OPENCV_ALL_HPP__ #define OPENCV_ALL_HPP
#include "opencv2/core/core_c.h" // File that defines what modules where included during the build of OpenCV
#include "opencv2/core/core.hpp" // These are purely the defines of the correct HAVE_OPENCV_modulename values
#include "opencv2/flann/miniflann.hpp" #include "opencv2/opencv_modules.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/imgproc/imgproc.hpp" // Then the list of defines is checked to include the correct headers
#include "opencv2/photo/photo.hpp" // Core library is always included --> without no OpenCV functionality available
#include "opencv2/video/video.hpp" #include "opencv2/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/objdetect/objdetect.hpp" // Then the optional modules are checked
#include "opencv2/calib3d/calib3d.hpp" #ifdef HAVE_OPENCV_CALIB3D
#include "opencv2/ml/ml.hpp" #include "opencv2/calib3d.hpp"
#include "opencv2/highgui/highgui_c.h" #endif
#include "opencv2/highgui/highgui.hpp" #ifdef HAVE_OPENCV_FEATURES2D
#include "opencv2/contrib/contrib.hpp" #include "opencv2/features2d.hpp"
#endif
#ifdef HAVE_OPENCV_DNN
#include "opencv2/dnn.hpp"
#endif
#ifdef HAVE_OPENCV_FLANN
#include "opencv2/flann.hpp"
#endif
#ifdef HAVE_OPENCV_HIGHGUI
#include "opencv2/highgui.hpp"
#endif
#ifdef HAVE_OPENCV_IMGCODECS
#include "opencv2/imgcodecs.hpp"
#endif
#ifdef HAVE_OPENCV_IMGPROC
#include "opencv2/imgproc.hpp"
#endif
#ifdef HAVE_OPENCV_ML
#include "opencv2/ml.hpp"
#endif
#ifdef HAVE_OPENCV_OBJDETECT
#include "opencv2/objdetect.hpp"
#endif
#ifdef HAVE_OPENCV_PHOTO
#include "opencv2/photo.hpp"
#endif
#ifdef HAVE_OPENCV_SHAPE
#include "opencv2/shape.hpp"
#endif
#ifdef HAVE_OPENCV_STITCHING
#include "opencv2/stitching.hpp"
#endif
#ifdef HAVE_OPENCV_SUPERRES
#include "opencv2/superres.hpp"
#endif
#ifdef HAVE_OPENCV_VIDEO
#include "opencv2/video.hpp"
#endif
#ifdef HAVE_OPENCV_VIDEOIO
#include "opencv2/videoio.hpp"
#endif
#ifdef HAVE_OPENCV_VIDEOSTAB
#include "opencv2/videostab.hpp"
#endif
#ifdef HAVE_OPENCV_VIZ
#include "opencv2/viz.hpp"
#endif
// Finally CUDA specific entries are checked and added
#ifdef HAVE_OPENCV_CUDAARITHM
#include "opencv2/cudaarithm.hpp"
#endif
#ifdef HAVE_OPENCV_CUDABGSEGM
#include "opencv2/cudabgsegm.hpp"
#endif
#ifdef HAVE_OPENCV_CUDACODEC
#include "opencv2/cudacodec.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAFEATURES2D
#include "opencv2/cudafeatures2d.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAFILTERS
#include "opencv2/cudafilters.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAIMGPROC
#include "opencv2/cudaimgproc.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAOBJDETECT
#include "opencv2/cudaobjdetect.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAOPTFLOW
#include "opencv2/cudaoptflow.hpp"
#endif
#ifdef HAVE_OPENCV_CUDASTEREO
#include "opencv2/cudastereo.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAWARPING
#include "opencv2/cudawarping.hpp"
#endif
#endif #endif

View File

@ -6,24 +6,26 @@
* *
*/ */
// This definition means that OpenCV is built with enabled non-free code.
// For example, patented algorithms for non-profit/non-commercial use only.
/* #undef OPENCV_ENABLE_NONFREE */
#define HAVE_OPENCV_CALIB3D #define HAVE_OPENCV_CALIB3D
#define HAVE_OPENCV_CONTRIB
#define HAVE_OPENCV_CORE #define HAVE_OPENCV_CORE
#define HAVE_OPENCV_DNN
#define HAVE_OPENCV_FEATURES2D #define HAVE_OPENCV_FEATURES2D
#define HAVE_OPENCV_FLANN #define HAVE_OPENCV_FLANN
#define HAVE_OPENCV_GPU
#define HAVE_OPENCV_HIGHGUI #define HAVE_OPENCV_HIGHGUI
#define HAVE_OPENCV_IMGCODECS
#define HAVE_OPENCV_IMGPROC #define HAVE_OPENCV_IMGPROC
#define HAVE_OPENCV_LEGACY
#define HAVE_OPENCV_ML #define HAVE_OPENCV_ML
#define HAVE_OPENCV_NONFREE
#define HAVE_OPENCV_OBJDETECT #define HAVE_OPENCV_OBJDETECT
#define HAVE_OPENCV_OCL
#define HAVE_OPENCV_PHOTO #define HAVE_OPENCV_PHOTO
#define HAVE_OPENCV_SHAPE
#define HAVE_OPENCV_STITCHING #define HAVE_OPENCV_STITCHING
#define HAVE_OPENCV_SUPERRES #define HAVE_OPENCV_SUPERRES
#define HAVE_OPENCV_TS
#define HAVE_OPENCV_VIDEO #define HAVE_OPENCV_VIDEO
#define HAVE_OPENCV_VIDEOIO
#define HAVE_OPENCV_VIDEOSTAB #define HAVE_OPENCV_VIDEOSTAB

View File

@ -7,11 +7,12 @@
// copy or use the software. // copy or use the software.
// //
// //
// License Agreement // License Agreement
// For Open Source Computer Vision Library // For Open Source Computer Vision Library
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -40,52 +41,8 @@
// //
//M*/ //M*/
#ifndef __OPENCV_PHOTO_HPP__ #ifdef __OPENCV_BUILD
#define __OPENCV_PHOTO_HPP__ #error this is a compatibility header which should not be used inside the OpenCV library
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/photo/photo_c.h"
#ifdef __cplusplus
/*! \namespace cv
Namespace where all the C++ OpenCV functionality resides
*/
namespace cv
{
//! the inpainting algorithm
enum
{
INPAINT_NS=CV_INPAINT_NS, // Navier-Stokes algorithm
INPAINT_TELEA=CV_INPAINT_TELEA // A. Telea algorithm
};
//! restores the damaged image areas using one of the available intpainting algorithms
CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask,
OutputArray dst, double inpaintRadius, int flags );
CV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst, float h = 3,
int templateWindowSize = 7, int searchWindowSize = 21);
CV_EXPORTS_W void fastNlMeansDenoisingColored( InputArray src, OutputArray dst,
float h = 3, float hColor = 3,
int templateWindowSize = 7, int searchWindowSize = 21);
CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst,
int imgToDenoiseIndex, int temporalWindowSize,
float h = 3, int templateWindowSize = 7, int searchWindowSize = 21);
CV_EXPORTS_W void fastNlMeansDenoisingColoredMulti( InputArrayOfArrays srcImgs, OutputArray dst,
int imgToDenoiseIndex, int temporalWindowSize,
float h = 3, float hColor = 3,
int templateWindowSize = 7, int searchWindowSize = 21);
}
#endif //__cplusplus
#endif #endif
#include "opencv2/photo.hpp"

View File

@ -40,8 +40,8 @@
// //
//M*/ //M*/
#ifndef __OPENCV_PHOTO_C_H__ #ifndef OPENCV_PHOTO_C_H
#define __OPENCV_PHOTO_C_H__ #define OPENCV_PHOTO_C_H
#include "opencv2/core/core_c.h" #include "opencv2/core/core_c.h"
@ -49,8 +49,12 @@
extern "C" { extern "C" {
#endif #endif
/** @addtogroup photo_c
@{
*/
/* Inpainting algorithms */ /* Inpainting algorithms */
enum enum InpaintingModes
{ {
CV_INPAINT_NS =0, CV_INPAINT_NS =0,
CV_INPAINT_TELEA =1 CV_INPAINT_TELEA =1
@ -61,9 +65,10 @@ enum
CVAPI(void) cvInpaint( const CvArr* src, const CvArr* inpaint_mask, CVAPI(void) cvInpaint( const CvArr* src, const CvArr* inpaint_mask,
CvArr* dst, double inpaintRange, int flags ); CvArr* dst, double inpaintRange, int flags );
/** @} */
#ifdef __cplusplus #ifdef __cplusplus
} //extern "C" } //extern "C"
#endif #endif
#endif //__OPENCV_PHOTO_C_H__ #endif //OPENCV_PHOTO_C_H

View File

@ -40,26 +40,47 @@
// //
//M*/ //M*/
#ifndef __OPENCV_STITCHING_AUTOCALIB_HPP__ #ifndef OPENCV_STITCHING_AUTOCALIB_HPP
#define __OPENCV_STITCHING_AUTOCALIB_HPP__ #define OPENCV_STITCHING_AUTOCALIB_HPP
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
#include "matchers.hpp" #include "matchers.hpp"
namespace cv { namespace cv {
namespace detail { namespace detail {
// See "Construction of Panoramic Image Mosaics with Global and Local Alignment" //! @addtogroup stitching_autocalib
// by Heung-Yeung Shum and Richard Szeliski. //! @{
/** @brief Tries to estimate focal lengths from the given homography under the assumption that the camera
undergoes rotations around its centre only.
@param H Homography.
@param f0 Estimated focal length along X axis.
@param f1 Estimated focal length along Y axis.
@param f0_ok True, if f0 was estimated successfully, false otherwise.
@param f1_ok True, if f1 was estimated successfully, false otherwise.
See "Construction of Panoramic Image Mosaics with Global and Local Alignment"
by Heung-Yeung Shum and Richard Szeliski.
*/
void CV_EXPORTS focalsFromHomography(const Mat &H, double &f0, double &f1, bool &f0_ok, bool &f1_ok); void CV_EXPORTS focalsFromHomography(const Mat &H, double &f0, double &f1, bool &f0_ok, bool &f1_ok);
/** @brief Estimates focal lengths for each given camera.
@param features Features of images.
@param pairwise_matches Matches between all image pairs.
@param focals Estimated focal lengths for each camera.
*/
void CV_EXPORTS estimateFocal(const std::vector<ImageFeatures> &features, void CV_EXPORTS estimateFocal(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches, const std::vector<MatchesInfo> &pairwise_matches,
std::vector<double> &focals); std::vector<double> &focals);
bool CV_EXPORTS calibrateRotatingCamera(const std::vector<Mat> &Hs, Mat &K); bool CV_EXPORTS calibrateRotatingCamera(const std::vector<Mat> &Hs, Mat &K);
//! @} stitching_autocalib
} // namespace detail } // namespace detail
} // namespace cv } // namespace cv
#endif // __OPENCV_STITCHING_AUTOCALIB_HPP__ #endif // OPENCV_STITCHING_AUTOCALIB_HPP

View File

@ -40,16 +40,26 @@
// //
//M*/ //M*/
#ifndef __OPENCV_STITCHING_BLENDERS_HPP__ #ifndef OPENCV_STITCHING_BLENDERS_HPP
#define __OPENCV_STITCHING_BLENDERS_HPP__ #define OPENCV_STITCHING_BLENDERS_HPP
#include "opencv2/core/core.hpp" #if defined(NO)
# warning Detected Apple 'NO' macro definition, it can cause build conflicts. Please, include this header before any Apple headers.
#endif
#include "opencv2/core.hpp"
#include "opencv2/core/cuda.hpp"
namespace cv { namespace cv {
namespace detail { namespace detail {
//! @addtogroup stitching_blend
//! @{
// Simple blender which puts one image over another /** @brief Base class for all blenders.
Simple blender which puts one image over another
*/
class CV_EXPORTS Blender class CV_EXPORTS Blender
{ {
public: public:
@ -58,17 +68,35 @@ public:
enum { NO, FEATHER, MULTI_BAND }; enum { NO, FEATHER, MULTI_BAND };
static Ptr<Blender> createDefault(int type, bool try_gpu = false); static Ptr<Blender> createDefault(int type, bool try_gpu = false);
/** @brief Prepares the blender for blending.
@param corners Source images top-left corners
@param sizes Source image sizes
*/
void prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes); void prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes);
/** @overload */
virtual void prepare(Rect dst_roi); virtual void prepare(Rect dst_roi);
virtual void feed(const Mat &img, const Mat &mask, Point tl); /** @brief Processes the image.
virtual void blend(Mat &dst, Mat &dst_mask);
@param img Source image
@param mask Source image mask
@param tl Source image top-left corners
*/
virtual void feed(InputArray img, InputArray mask, Point tl);
/** @brief Blends and returns the final pano.
@param dst Final pano
@param dst_mask Final pano mask
*/
virtual void blend(InputOutputArray dst, InputOutputArray dst_mask);
protected: protected:
Mat dst_, dst_mask_; UMat dst_, dst_mask_;
Rect dst_roi_; Rect dst_roi_;
}; };
/** @brief Simple blender which mixes images at its borders.
*/
class CV_EXPORTS FeatherBlender : public Blender class CV_EXPORTS FeatherBlender : public Blender
{ {
public: public:
@ -77,24 +105,25 @@ public:
float sharpness() const { return sharpness_; } float sharpness() const { return sharpness_; }
void setSharpness(float val) { sharpness_ = val; } void setSharpness(float val) { sharpness_ = val; }
void prepare(Rect dst_roi); void prepare(Rect dst_roi) CV_OVERRIDE;
void feed(const Mat &img, const Mat &mask, Point tl); void feed(InputArray img, InputArray mask, Point tl) CV_OVERRIDE;
void blend(Mat &dst, Mat &dst_mask); void blend(InputOutputArray dst, InputOutputArray dst_mask) CV_OVERRIDE;
// Creates weight maps for fixed set of source images by their masks and top-left corners. //! Creates weight maps for fixed set of source images by their masks and top-left corners.
// Final image can be obtained by simple weighting of the source images. //! Final image can be obtained by simple weighting of the source images.
Rect createWeightMaps(const std::vector<Mat> &masks, const std::vector<Point> &corners, Rect createWeightMaps(const std::vector<UMat> &masks, const std::vector<Point> &corners,
std::vector<Mat> &weight_maps); std::vector<UMat> &weight_maps);
private: private:
float sharpness_; float sharpness_;
Mat weight_map_; UMat weight_map_;
Mat dst_weight_map_; UMat dst_weight_map_;
}; };
inline FeatherBlender::FeatherBlender(float _sharpness) { setSharpness(_sharpness); } inline FeatherBlender::FeatherBlender(float _sharpness) { setSharpness(_sharpness); }
/** @brief Blender which uses multi-band blending algorithm (see @cite BA83).
*/
class CV_EXPORTS MultiBandBlender : public Blender class CV_EXPORTS MultiBandBlender : public Blender
{ {
public: public:
@ -103,35 +132,53 @@ public:
int numBands() const { return actual_num_bands_; } int numBands() const { return actual_num_bands_; }
void setNumBands(int val) { actual_num_bands_ = val; } void setNumBands(int val) { actual_num_bands_ = val; }
void prepare(Rect dst_roi); void prepare(Rect dst_roi) CV_OVERRIDE;
void feed(const Mat &img, const Mat &mask, Point tl); void feed(InputArray img, InputArray mask, Point tl) CV_OVERRIDE;
void blend(Mat &dst, Mat &dst_mask); void blend(InputOutputArray dst, InputOutputArray dst_mask) CV_OVERRIDE;
private: private:
int actual_num_bands_, num_bands_; int actual_num_bands_, num_bands_;
std::vector<Mat> dst_pyr_laplace_; std::vector<UMat> dst_pyr_laplace_;
std::vector<Mat> dst_band_weights_; std::vector<UMat> dst_band_weights_;
Rect dst_roi_final_; Rect dst_roi_final_;
bool can_use_gpu_; bool can_use_gpu_;
int weight_type_; //CV_32F or CV_16S int weight_type_; //CV_32F or CV_16S
#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
std::vector<cuda::GpuMat> gpu_dst_pyr_laplace_;
std::vector<cuda::GpuMat> gpu_dst_band_weights_;
std::vector<Point> gpu_tl_points_;
std::vector<cuda::GpuMat> gpu_imgs_with_border_;
std::vector<std::vector<cuda::GpuMat> > gpu_weight_pyr_gauss_vec_;
std::vector<std::vector<cuda::GpuMat> > gpu_src_pyr_laplace_vec_;
std::vector<std::vector<cuda::GpuMat> > gpu_ups_;
cuda::GpuMat gpu_dst_mask_;
cuda::GpuMat gpu_mask_;
cuda::GpuMat gpu_img_;
cuda::GpuMat gpu_weight_map_;
cuda::GpuMat gpu_add_mask_;
int gpu_feed_idx_;
bool gpu_initialized_;
#endif
}; };
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
// Auxiliary functions // Auxiliary functions
void CV_EXPORTS normalizeUsingWeightMap(const Mat& weight, Mat& src); void CV_EXPORTS normalizeUsingWeightMap(InputArray weight, InputOutputArray src);
void CV_EXPORTS createWeightMap(const Mat& mask, float sharpness, Mat& weight); void CV_EXPORTS createWeightMap(InputArray mask, float sharpness, InputOutputArray weight);
void CV_EXPORTS createLaplacePyr(const Mat &img, int num_levels, std::vector<Mat>& pyr); void CV_EXPORTS createLaplacePyr(InputArray img, int num_levels, std::vector<UMat>& pyr);
void CV_EXPORTS createLaplacePyrGpu(const Mat &img, int num_levels, std::vector<Mat>& pyr); void CV_EXPORTS createLaplacePyrGpu(InputArray img, int num_levels, std::vector<UMat>& pyr);
// Restores source image // Restores source image
void CV_EXPORTS restoreImageFromLaplacePyr(std::vector<Mat>& pyr); void CV_EXPORTS restoreImageFromLaplacePyr(std::vector<UMat>& pyr);
void CV_EXPORTS restoreImageFromLaplacePyrGpu(std::vector<Mat>& pyr); void CV_EXPORTS restoreImageFromLaplacePyrGpu(std::vector<UMat>& pyr);
//! @}
} // namespace detail } // namespace detail
} // namespace cv } // namespace cv
#endif // __OPENCV_STITCHING_BLENDERS_HPP__ #endif // OPENCV_STITCHING_BLENDERS_HPP

View File

@ -40,19 +40,26 @@
// //
//M*/ //M*/
#ifndef __OPENCV_STITCHING_CAMERA_HPP__ #ifndef OPENCV_STITCHING_CAMERA_HPP
#define __OPENCV_STITCHING_CAMERA_HPP__ #define OPENCV_STITCHING_CAMERA_HPP
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
namespace cv { namespace cv {
namespace detail { namespace detail {
//! @addtogroup stitching
//! @{
/** @brief Describes camera parameters.
@note Translation is assumed to be zero during the whole stitching pipeline. :
*/
struct CV_EXPORTS CameraParams struct CV_EXPORTS CameraParams
{ {
CameraParams(); CameraParams();
CameraParams(const CameraParams& other); CameraParams(const CameraParams& other);
const CameraParams& operator =(const CameraParams& other); CameraParams& operator =(const CameraParams& other);
Mat K() const; Mat K() const;
double focal; // Focal length double focal; // Focal length
@ -63,7 +70,9 @@ struct CV_EXPORTS CameraParams
Mat t; // Translation Mat t; // Translation
}; };
//! @}
} // namespace detail } // namespace detail
} // namespace cv } // namespace cv
#endif // #ifndef __OPENCV_STITCHING_CAMERA_HPP__ #endif // #ifndef OPENCV_STITCHING_CAMERA_HPP

View File

@ -40,14 +40,23 @@
// //
//M*/ //M*/
#ifndef __OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP__ #ifndef OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP
#define __OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP__ #define OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP
#include "opencv2/core/core.hpp" #if defined(NO)
# warning Detected Apple 'NO' macro definition, it can cause build conflicts. Please, include this header before any Apple headers.
#endif
#include "opencv2/core.hpp"
namespace cv { namespace cv {
namespace detail { namespace detail {
//! @addtogroup stitching_exposure
//! @{
/** @brief Base class for all exposure compensators.
*/
class CV_EXPORTS ExposureCompensator class CV_EXPORTS ExposureCompensator
{ {
public: public:
@ -56,51 +65,72 @@ public:
enum { NO, GAIN, GAIN_BLOCKS }; enum { NO, GAIN, GAIN_BLOCKS };
static Ptr<ExposureCompensator> createDefault(int type); static Ptr<ExposureCompensator> createDefault(int type);
void feed(const std::vector<Point> &corners, const std::vector<Mat> &images, /**
const std::vector<Mat> &masks); @param corners Source image top-left corners
virtual void feed(const std::vector<Point> &corners, const std::vector<Mat> &images, @param images Source images
const std::vector<std::pair<Mat,uchar> > &masks) = 0; @param masks Image masks to update (second value in pair specifies the value which should be used
virtual void apply(int index, Point corner, Mat &image, const Mat &mask) = 0; to detect where image is)
*/
void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<UMat> &masks);
/** @overload */
virtual void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks) = 0;
/** @brief Compensate exposure in the specified image.
@param index Image index
@param corner Image top-left corner
@param image Image to process
@param mask Image mask
*/
virtual void apply(int index, Point corner, InputOutputArray image, InputArray mask) = 0;
}; };
/** @brief Stub exposure compensator which does nothing.
*/
class CV_EXPORTS NoExposureCompensator : public ExposureCompensator class CV_EXPORTS NoExposureCompensator : public ExposureCompensator
{ {
public: public:
void feed(const std::vector<Point> &/*corners*/, const std::vector<Mat> &/*images*/, void feed(const std::vector<Point> &/*corners*/, const std::vector<UMat> &/*images*/,
const std::vector<std::pair<Mat,uchar> > &/*masks*/) {}; const std::vector<std::pair<UMat,uchar> > &/*masks*/) CV_OVERRIDE { }
void apply(int /*index*/, Point /*corner*/, Mat &/*image*/, const Mat &/*mask*/) {}; void apply(int /*index*/, Point /*corner*/, InputOutputArray /*image*/, InputArray /*mask*/) CV_OVERRIDE { }
}; };
/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image
intensities, see @cite BL07 and @cite WJ10 for details.
*/
class CV_EXPORTS GainCompensator : public ExposureCompensator class CV_EXPORTS GainCompensator : public ExposureCompensator
{ {
public: public:
void feed(const std::vector<Point> &corners, const std::vector<Mat> &images, void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<Mat,uchar> > &masks); const std::vector<std::pair<UMat,uchar> > &masks) CV_OVERRIDE;
void apply(int index, Point corner, Mat &image, const Mat &mask); void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE;
std::vector<double> gains() const; std::vector<double> gains() const;
private: private:
Mat_<double> gains_; Mat_<double> gains_;
}; };
/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image block
intensities, see @cite UES01 for details.
*/
class CV_EXPORTS BlocksGainCompensator : public ExposureCompensator class CV_EXPORTS BlocksGainCompensator : public ExposureCompensator
{ {
public: public:
BlocksGainCompensator(int bl_width = 32, int bl_height = 32) BlocksGainCompensator(int bl_width = 32, int bl_height = 32)
: bl_width_(bl_width), bl_height_(bl_height) {} : bl_width_(bl_width), bl_height_(bl_height) {}
void feed(const std::vector<Point> &corners, const std::vector<Mat> &images, void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<Mat,uchar> > &masks); const std::vector<std::pair<UMat,uchar> > &masks) CV_OVERRIDE;
void apply(int index, Point corner, Mat &image, const Mat &mask); void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE;
private: private:
int bl_width_, bl_height_; int bl_width_, bl_height_;
std::vector<Mat_<float> > gain_maps_; std::vector<UMat> gain_maps_;
}; };
//! @}
} // namespace detail } // namespace detail
} // namespace cv } // namespace cv
#endif // __OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP__ #endif // OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP

View File

@ -40,44 +40,89 @@
// //
//M*/ //M*/
#ifndef __OPENCV_STITCHING_MATCHERS_HPP__ #ifndef OPENCV_STITCHING_MATCHERS_HPP
#define __OPENCV_STITCHING_MATCHERS_HPP__ #define OPENCV_STITCHING_MATCHERS_HPP
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
#include "opencv2/core/gpumat.hpp" #include "opencv2/features2d.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/opencv_modules.hpp" #include "opencv2/opencv_modules.hpp"
#if defined(HAVE_OPENCV_NONFREE) #ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/nonfree/gpu.hpp" # include "opencv2/xfeatures2d/cuda.hpp"
#endif #endif
namespace cv { namespace cv {
namespace detail { namespace detail {
//! @addtogroup stitching_match
//! @{
/** @brief Structure containing image keypoints and descriptors. */
struct CV_EXPORTS ImageFeatures struct CV_EXPORTS ImageFeatures
{ {
int img_idx; int img_idx;
Size img_size; Size img_size;
std::vector<KeyPoint> keypoints; std::vector<KeyPoint> keypoints;
Mat descriptors; UMat descriptors;
}; };
/** @brief Feature finders base class */
class CV_EXPORTS FeaturesFinder class CV_EXPORTS FeaturesFinder
{ {
public: public:
virtual ~FeaturesFinder() {} virtual ~FeaturesFinder() {}
void operator ()(const Mat &image, ImageFeatures &features); /** @overload */
void operator ()(const Mat &image, ImageFeatures &features, const std::vector<cv::Rect> &rois); void operator ()(InputArray image, ImageFeatures &features);
/** @brief Finds features in the given image.
@param image Source image
@param features Found features
@param rois Regions of interest
@sa detail::ImageFeatures, Rect_
*/
void operator ()(InputArray image, ImageFeatures &features, const std::vector<cv::Rect> &rois);
/** @brief Finds features in the given images in parallel.
@param images Source images
@param features Found features for each image
@param rois Regions of interest for each image
@sa detail::ImageFeatures, Rect_
*/
void operator ()(InputArrayOfArrays images, std::vector<ImageFeatures> &features,
const std::vector<std::vector<cv::Rect> > &rois);
/** @overload */
void operator ()(InputArrayOfArrays images, std::vector<ImageFeatures> &features);
/** @brief Frees unused memory allocated before if there is any. */
virtual void collectGarbage() {} virtual void collectGarbage() {}
/* TODO OpenCV ABI 4.x
reimplement this as public method similar to FeaturesMatcher and remove private function hack
@return True, if it's possible to use the same finder instance in parallel, false otherwise
bool isThreadSafe() const { return is_thread_safe_; }
*/
protected: protected:
virtual void find(const Mat &image, ImageFeatures &features) = 0; /** @brief This method must implement features finding logic in order to make the wrappers
detail::FeaturesFinder::operator()_ work.
@param image Source image
@param features Found features
@sa detail::ImageFeatures */
virtual void find(InputArray image, ImageFeatures &features) = 0;
/** @brief uses dynamic_cast to determine thread-safety
@return True, if it's possible to use the same finder instance in parallel, false otherwise
*/
bool isThreadSafe() const;
}; };
/** @brief SURF features finder.
@sa detail::FeaturesFinder, SURF
*/
class CV_EXPORTS SurfFeaturesFinder : public FeaturesFinder class CV_EXPORTS SurfFeaturesFinder : public FeaturesFinder
{ {
public: public:
@ -85,106 +130,241 @@ public:
int num_octaves_descr = /*4*/3, int num_layers_descr = /*2*/4); int num_octaves_descr = /*4*/3, int num_layers_descr = /*2*/4);
private: private:
void find(const Mat &image, ImageFeatures &features); void find(InputArray image, ImageFeatures &features) CV_OVERRIDE;
Ptr<FeatureDetector> detector_; Ptr<FeatureDetector> detector_;
Ptr<DescriptorExtractor> extractor_; Ptr<DescriptorExtractor> extractor_;
Ptr<Feature2D> surf; Ptr<Feature2D> surf;
}; };
/** @brief SIFT features finder.
@sa detail::FeaturesFinder, SIFT
*/
class CV_EXPORTS SiftFeaturesFinder : public FeaturesFinder
{
public:
SiftFeaturesFinder();
private:
void find(InputArray image, ImageFeatures &features) CV_OVERRIDE;
Ptr<Feature2D> sift;
};
/** @brief ORB features finder. :
@sa detail::FeaturesFinder, ORB
*/
class CV_EXPORTS OrbFeaturesFinder : public FeaturesFinder class CV_EXPORTS OrbFeaturesFinder : public FeaturesFinder
{ {
public: public:
OrbFeaturesFinder(Size _grid_size = Size(3,1), int nfeatures=1500, float scaleFactor=1.3f, int nlevels=5); OrbFeaturesFinder(Size _grid_size = Size(3,1), int nfeatures=1500, float scaleFactor=1.3f, int nlevels=5);
private: private:
void find(const Mat &image, ImageFeatures &features); void find(InputArray image, ImageFeatures &features) CV_OVERRIDE;
Ptr<ORB> orb; Ptr<ORB> orb;
Size grid_size; Size grid_size;
}; };
/** @brief AKAZE features finder. :
#if defined(HAVE_OPENCV_NONFREE) @sa detail::FeaturesFinder, AKAZE
*/
class CV_EXPORTS AKAZEFeaturesFinder : public detail::FeaturesFinder
{
public:
AKAZEFeaturesFinder(int descriptor_type = AKAZE::DESCRIPTOR_MLDB,
int descriptor_size = 0,
int descriptor_channels = 3,
float threshold = 0.001f,
int nOctaves = 4,
int nOctaveLayers = 4,
int diffusivity = KAZE::DIFF_PM_G2);
private:
void find(InputArray image, ImageFeatures &features) CV_OVERRIDE;
Ptr<AKAZE> akaze;
};
#ifdef HAVE_OPENCV_XFEATURES2D
class CV_EXPORTS SurfFeaturesFinderGpu : public FeaturesFinder class CV_EXPORTS SurfFeaturesFinderGpu : public FeaturesFinder
{ {
public: public:
SurfFeaturesFinderGpu(double hess_thresh = 300., int num_octaves = 3, int num_layers = 4, SurfFeaturesFinderGpu(double hess_thresh = 300., int num_octaves = 3, int num_layers = 4,
int num_octaves_descr = 4, int num_layers_descr = 2); int num_octaves_descr = 4, int num_layers_descr = 2);
void collectGarbage(); void collectGarbage() CV_OVERRIDE;
private: private:
void find(const Mat &image, ImageFeatures &features); void find(InputArray image, ImageFeatures &features) CV_OVERRIDE;
gpu::GpuMat image_; cuda::GpuMat image_;
gpu::GpuMat gray_image_; cuda::GpuMat gray_image_;
gpu::SURF_GPU surf_; cuda::SURF_CUDA surf_;
gpu::GpuMat keypoints_; cuda::GpuMat keypoints_;
gpu::GpuMat descriptors_; cuda::GpuMat descriptors_;
int num_octaves_, num_layers_; int num_octaves_, num_layers_;
int num_octaves_descr_, num_layers_descr_; int num_octaves_descr_, num_layers_descr_;
}; };
#endif #endif
/** @brief Structure containing information about matches between two images.
It's assumed that there is a transformation between those images. Transformation may be
homography or affine transformation based on selected matcher.
@sa detail::FeaturesMatcher
*/
struct CV_EXPORTS MatchesInfo struct CV_EXPORTS MatchesInfo
{ {
MatchesInfo(); MatchesInfo();
MatchesInfo(const MatchesInfo &other); MatchesInfo(const MatchesInfo &other);
const MatchesInfo& operator =(const MatchesInfo &other); MatchesInfo& operator =(const MatchesInfo &other);
int src_img_idx, dst_img_idx; // Images indices (optional) int src_img_idx, dst_img_idx; //!< Images indices (optional)
std::vector<DMatch> matches; std::vector<DMatch> matches;
std::vector<uchar> inliers_mask; // Geometrically consistent matches mask std::vector<uchar> inliers_mask; //!< Geometrically consistent matches mask
int num_inliers; // Number of geometrically consistent matches int num_inliers; //!< Number of geometrically consistent matches
Mat H; // Estimated homography Mat H; //!< Estimated transformation
double confidence; // Confidence two images are from the same panorama double confidence; //!< Confidence two images are from the same panorama
}; };
/** @brief Feature matchers base class. */
class CV_EXPORTS FeaturesMatcher class CV_EXPORTS FeaturesMatcher
{ {
public: public:
virtual ~FeaturesMatcher() {} virtual ~FeaturesMatcher() {}
/** @overload
@param features1 First image features
@param features2 Second image features
@param matches_info Found matches
*/
void operator ()(const ImageFeatures &features1, const ImageFeatures &features2, void operator ()(const ImageFeatures &features1, const ImageFeatures &features2,
MatchesInfo& matches_info) { match(features1, features2, matches_info); } MatchesInfo& matches_info) { match(features1, features2, matches_info); }
void operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches, /** @brief Performs images matching.
const cv::Mat &mask = cv::Mat());
@param features Features of the source images
@param pairwise_matches Found pairwise matches
@param mask Mask indicating which image pairs must be matched
The function is parallelized with the TBB library.
@sa detail::MatchesInfo
*/
void operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
const cv::UMat &mask = cv::UMat());
/** @return True, if it's possible to use the same matcher instance in parallel, false otherwise
*/
bool isThreadSafe() const { return is_thread_safe_; } bool isThreadSafe() const { return is_thread_safe_; }
/** @brief Frees unused memory allocated before if there is any.
*/
virtual void collectGarbage() {} virtual void collectGarbage() {}
protected: protected:
FeaturesMatcher(bool is_thread_safe = false) : is_thread_safe_(is_thread_safe) {} FeaturesMatcher(bool is_thread_safe = false) : is_thread_safe_(is_thread_safe) {}
/** @brief This method must implement matching logic in order to make the wrappers
detail::FeaturesMatcher::operator()_ work.
@param features1 first image features
@param features2 second image features
@param matches_info found matches
*/
virtual void match(const ImageFeatures &features1, const ImageFeatures &features2, virtual void match(const ImageFeatures &features1, const ImageFeatures &features2,
MatchesInfo& matches_info) = 0; MatchesInfo& matches_info) = 0;
bool is_thread_safe_; bool is_thread_safe_;
}; };
/** @brief Features matcher which finds two best matches for each feature and leaves the best one only if the
ratio between descriptor distances is greater than the threshold match_conf
@sa detail::FeaturesMatcher
*/
class CV_EXPORTS BestOf2NearestMatcher : public FeaturesMatcher class CV_EXPORTS BestOf2NearestMatcher : public FeaturesMatcher
{ {
public: public:
/** @brief Constructs a "best of 2 nearest" matcher.
@param try_use_gpu Should try to use GPU or not
@param match_conf Match distances ration threshold
@param num_matches_thresh1 Minimum number of matches required for the 2D projective transform
estimation used in the inliers classification step
@param num_matches_thresh2 Minimum number of matches required for the 2D projective transform
re-estimation on inliers
*/
BestOf2NearestMatcher(bool try_use_gpu = false, float match_conf = 0.3f, int num_matches_thresh1 = 6, BestOf2NearestMatcher(bool try_use_gpu = false, float match_conf = 0.3f, int num_matches_thresh1 = 6,
int num_matches_thresh2 = 6); int num_matches_thresh2 = 6);
void collectGarbage(); void collectGarbage() CV_OVERRIDE;
protected: protected:
void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo &matches_info); void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo &matches_info) CV_OVERRIDE;
int num_matches_thresh1_; int num_matches_thresh1_;
int num_matches_thresh2_; int num_matches_thresh2_;
Ptr<FeaturesMatcher> impl_; Ptr<FeaturesMatcher> impl_;
}; };
class CV_EXPORTS BestOf2NearestRangeMatcher : public BestOf2NearestMatcher
{
public:
BestOf2NearestRangeMatcher(int range_width = 5, bool try_use_gpu = false, float match_conf = 0.3f,
int num_matches_thresh1 = 6, int num_matches_thresh2 = 6);
void operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
const cv::UMat &mask = cv::UMat());
protected:
int range_width_;
};
/** @brief Features matcher similar to cv::detail::BestOf2NearestMatcher which
finds two best matches for each feature and leaves the best one only if the
ratio between descriptor distances is greater than the threshold match_conf.
Unlike cv::detail::BestOf2NearestMatcher this matcher uses affine
transformation (affine trasformation estimate will be placed in matches_info).
@sa cv::detail::FeaturesMatcher cv::detail::BestOf2NearestMatcher
*/
class CV_EXPORTS AffineBestOf2NearestMatcher : public BestOf2NearestMatcher
{
public:
/** @brief Constructs a "best of 2 nearest" matcher that expects affine trasformation
between images
@param full_affine whether to use full affine transformation with 6 degress of freedom or reduced
transformation with 4 degrees of freedom using only rotation, translation and uniform scaling
@param try_use_gpu Should try to use GPU or not
@param match_conf Match distances ration threshold
@param num_matches_thresh1 Minimum number of matches required for the 2D affine transform
estimation used in the inliers classification step
@sa cv::estimateAffine2D cv::estimateAffinePartial2D
*/
AffineBestOf2NearestMatcher(bool full_affine = false, bool try_use_gpu = false,
float match_conf = 0.3f, int num_matches_thresh1 = 6) :
BestOf2NearestMatcher(try_use_gpu, match_conf, num_matches_thresh1, num_matches_thresh1),
full_affine_(full_affine) {}
protected:
void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo &matches_info) CV_OVERRIDE;
bool full_affine_;
};
//! @} stitching_match
} // namespace detail } // namespace detail
} // namespace cv } // namespace cv
#endif // __OPENCV_STITCHING_MATCHERS_HPP__ #endif // OPENCV_STITCHING_MATCHERS_HPP

View File

@ -40,10 +40,10 @@
// //
//M*/ //M*/
#ifndef __OPENCV_STITCHING_MOTION_ESTIMATORS_HPP__ #ifndef OPENCV_STITCHING_MOTION_ESTIMATORS_HPP
#define __OPENCV_STITCHING_MOTION_ESTIMATORS_HPP__ #define OPENCV_STITCHING_MOTION_ESTIMATORS_HPP
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
#include "matchers.hpp" #include "matchers.hpp"
#include "util.hpp" #include "util.hpp"
#include "camera.hpp" #include "camera.hpp"
@ -51,21 +51,50 @@
namespace cv { namespace cv {
namespace detail { namespace detail {
//! @addtogroup stitching_rotation
//! @{
/** @brief Rotation estimator base class.
It takes features of all images, pairwise matches between all images and estimates rotations of all
cameras.
@note The coordinate system origin is implementation-dependent, but you can always normalize the
rotations in respect to the first camera, for instance. :
*/
class CV_EXPORTS Estimator class CV_EXPORTS Estimator
{ {
public: public:
virtual ~Estimator() {} virtual ~Estimator() {}
void operator ()(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches, /** @brief Estimates camera parameters.
@param features Features of images
@param pairwise_matches Pairwise matches of images
@param cameras Estimated camera parameters
@return True in case of success, false otherwise
*/
bool operator ()(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras) std::vector<CameraParams> &cameras)
{ estimate(features, pairwise_matches, cameras); } { return estimate(features, pairwise_matches, cameras); }
protected: protected:
virtual void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches, /** @brief This method must implement camera parameters estimation logic in order to make the wrapper
detail::Estimator::operator()_ work.
@param features Features of images
@param pairwise_matches Pairwise matches of images
@param cameras Estimated camera parameters
@return True in case of success, false otherwise
*/
virtual bool estimate(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras) = 0; std::vector<CameraParams> &cameras) = 0;
}; };
/** @brief Homography based rotation estimator.
*/
class CV_EXPORTS HomographyBasedEstimator : public Estimator class CV_EXPORTS HomographyBasedEstimator : public Estimator
{ {
public: public:
@ -73,13 +102,30 @@ public:
: is_focals_estimated_(is_focals_estimated) {} : is_focals_estimated_(is_focals_estimated) {}
private: private:
void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches, virtual bool estimate(const std::vector<ImageFeatures> &features,
std::vector<CameraParams> &cameras); const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras) CV_OVERRIDE;
bool is_focals_estimated_; bool is_focals_estimated_;
}; };
/** @brief Affine transformation based estimator.
This estimator uses pairwise transformations estimated by matcher to estimate
final transformation for each camera.
@sa cv::detail::HomographyBasedEstimator
*/
class CV_EXPORTS AffineBasedEstimator : public Estimator
{
private:
virtual bool estimate(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras) CV_OVERRIDE;
};
/** @brief Base class for all camera parameters refinement methods.
*/
class CV_EXPORTS BundleAdjusterBase : public Estimator class CV_EXPORTS BundleAdjusterBase : public Estimator
{ {
public: public:
@ -93,27 +139,51 @@ public:
double confThresh() const { return conf_thresh_; } double confThresh() const { return conf_thresh_; }
void setConfThresh(double conf_thresh) { conf_thresh_ = conf_thresh; } void setConfThresh(double conf_thresh) { conf_thresh_ = conf_thresh; }
CvTermCriteria termCriteria() { return term_criteria_; } TermCriteria termCriteria() { return term_criteria_; }
void setTermCriteria(const CvTermCriteria& term_criteria) { term_criteria_ = term_criteria; } void setTermCriteria(const TermCriteria& term_criteria) { term_criteria_ = term_criteria; }
protected: protected:
/** @brief Construct a bundle adjuster base instance.
@param num_params_per_cam Number of parameters per camera
@param num_errs_per_measurement Number of error terms (components) per match
*/
BundleAdjusterBase(int num_params_per_cam, int num_errs_per_measurement) BundleAdjusterBase(int num_params_per_cam, int num_errs_per_measurement)
: num_params_per_cam_(num_params_per_cam), : num_images_(0), total_num_matches_(0),
num_errs_per_measurement_(num_errs_per_measurement) num_params_per_cam_(num_params_per_cam),
num_errs_per_measurement_(num_errs_per_measurement),
features_(0), pairwise_matches_(0), conf_thresh_(0)
{ {
setRefinementMask(Mat::ones(3, 3, CV_8U)); setRefinementMask(Mat::ones(3, 3, CV_8U));
setConfThresh(1.); setConfThresh(1.);
setTermCriteria(cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 1000, DBL_EPSILON)); setTermCriteria(TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 1000, DBL_EPSILON));
} }
// Runs bundle adjustment // Runs bundle adjustment
virtual void estimate(const std::vector<ImageFeatures> &features, virtual bool estimate(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches, const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras); std::vector<CameraParams> &cameras) CV_OVERRIDE;
/** @brief Sets initial camera parameter to refine.
@param cameras Camera parameters
*/
virtual void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) = 0; virtual void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) = 0;
/** @brief Gets the refined camera parameters.
@param cameras Refined camera parameters
*/
virtual void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const = 0; virtual void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const = 0;
/** @brief Calculates error vector.
@param err Error column-vector of length total_num_matches \* num_errs_per_measurement
*/
virtual void calcError(Mat &err) = 0; virtual void calcError(Mat &err) = 0;
/** @brief Calculates the cost function jacobian.
@param jac Jacobian matrix of dimensions
(total_num_matches \* num_errs_per_measurement) x (num_images \* num_params_per_cam)
*/
virtual void calcJacobian(Mat &jac) = 0; virtual void calcJacobian(Mat &jac) = 0;
// 3x3 8U mask, where 0 means don't refine respective parameter, != 0 means refine // 3x3 8U mask, where 0 means don't refine respective parameter, != 0 means refine
@ -131,8 +201,8 @@ protected:
// Threshold to filter out poorly matched image pairs // Threshold to filter out poorly matched image pairs
double conf_thresh_; double conf_thresh_;
//LevenbergMarquardt algorithm termination criteria //Levenberg-Marquardt algorithm termination criteria
CvTermCriteria term_criteria_; TermCriteria term_criteria_;
// Camera parameters matrix (CV_64F) // Camera parameters matrix (CV_64F)
Mat cam_params_; Mat cam_params_;
@ -142,36 +212,110 @@ protected:
}; };
// Minimizes reprojection error. /** @brief Stub bundle adjuster that does nothing.
// It can estimate focal length, aspect ratio, principal point. */
// You can affect only on them via the refinement mask. class CV_EXPORTS NoBundleAdjuster : public BundleAdjusterBase
{
public:
NoBundleAdjuster() : BundleAdjusterBase(0, 0) {}
private:
bool estimate(const std::vector<ImageFeatures> &, const std::vector<MatchesInfo> &,
std::vector<CameraParams> &) CV_OVERRIDE
{
return true;
}
void setUpInitialCameraParams(const std::vector<CameraParams> &) CV_OVERRIDE {}
void obtainRefinedCameraParams(std::vector<CameraParams> &) const CV_OVERRIDE {}
void calcError(Mat &) CV_OVERRIDE {}
void calcJacobian(Mat &) CV_OVERRIDE {}
};
/** @brief Implementation of the camera parameters refinement algorithm which minimizes sum of the reprojection
error squares
It can estimate focal length, aspect ratio, principal point.
You can affect only on them via the refinement mask.
*/
class CV_EXPORTS BundleAdjusterReproj : public BundleAdjusterBase class CV_EXPORTS BundleAdjusterReproj : public BundleAdjusterBase
{ {
public: public:
BundleAdjusterReproj() : BundleAdjusterBase(7, 2) {} BundleAdjusterReproj() : BundleAdjusterBase(7, 2) {}
private: private:
void setUpInitialCameraParams(const std::vector<CameraParams> &cameras); void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) CV_OVERRIDE;
void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const; void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const CV_OVERRIDE;
void calcError(Mat &err); void calcError(Mat &err) CV_OVERRIDE;
void calcJacobian(Mat &jac); void calcJacobian(Mat &jac) CV_OVERRIDE;
Mat err1_, err2_; Mat err1_, err2_;
}; };
// Minimizes sun of ray-to-ray distances. /** @brief Implementation of the camera parameters refinement algorithm which minimizes sum of the distances
// It can estimate focal length. It ignores the refinement mask for now. between the rays passing through the camera center and a feature. :
It can estimate focal length. It ignores the refinement mask for now.
*/
class CV_EXPORTS BundleAdjusterRay : public BundleAdjusterBase class CV_EXPORTS BundleAdjusterRay : public BundleAdjusterBase
{ {
public: public:
BundleAdjusterRay() : BundleAdjusterBase(4, 3) {} BundleAdjusterRay() : BundleAdjusterBase(4, 3) {}
private: private:
void setUpInitialCameraParams(const std::vector<CameraParams> &cameras); void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) CV_OVERRIDE;
void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const; void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const CV_OVERRIDE;
void calcError(Mat &err); void calcError(Mat &err) CV_OVERRIDE;
void calcJacobian(Mat &jac); void calcJacobian(Mat &jac) CV_OVERRIDE;
Mat err1_, err2_;
};
/** @brief Bundle adjuster that expects affine transformation
represented in homogeneous coordinates in R for each camera param. Implements
camera parameters refinement algorithm which minimizes sum of the reprojection
error squares
It estimates all transformation parameters. Refinement mask is ignored.
@sa AffineBasedEstimator AffineBestOf2NearestMatcher BundleAdjusterAffinePartial
*/
class CV_EXPORTS BundleAdjusterAffine : public BundleAdjusterBase
{
public:
BundleAdjusterAffine() : BundleAdjusterBase(6, 2) {}
private:
void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) CV_OVERRIDE;
void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const CV_OVERRIDE;
void calcError(Mat &err) CV_OVERRIDE;
void calcJacobian(Mat &jac) CV_OVERRIDE;
Mat err1_, err2_;
};
/** @brief Bundle adjuster that expects affine transformation with 4 DOF
represented in homogeneous coordinates in R for each camera param. Implements
camera parameters refinement algorithm which minimizes sum of the reprojection
error squares
It estimates all transformation parameters. Refinement mask is ignored.
@sa AffineBasedEstimator AffineBestOf2NearestMatcher BundleAdjusterAffine
*/
class CV_EXPORTS BundleAdjusterAffinePartial : public BundleAdjusterBase
{
public:
BundleAdjusterAffinePartial() : BundleAdjusterBase(4, 2) {}
private:
void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) CV_OVERRIDE;
void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const CV_OVERRIDE;
void calcError(Mat &err) CV_OVERRIDE;
void calcJacobian(Mat &jac) CV_OVERRIDE;
Mat err1_, err2_; Mat err1_, err2_;
}; };
@ -183,6 +327,11 @@ enum WaveCorrectKind
WAVE_CORRECT_VERT WAVE_CORRECT_VERT
}; };
/** @brief Tries to make panorama more horizontal (or vertical).
@param rmats Camera rotation matrices.
@param kind Correction kind, see detail::WaveCorrectKind.
*/
void CV_EXPORTS waveCorrect(std::vector<Mat> &rmats, WaveCorrectKind kind); void CV_EXPORTS waveCorrect(std::vector<Mat> &rmats, WaveCorrectKind kind);
@ -190,16 +339,21 @@ void CV_EXPORTS waveCorrect(std::vector<Mat> &rmats, WaveCorrectKind kind);
// Auxiliary functions // Auxiliary functions
// Returns matches graph representation in DOT language // Returns matches graph representation in DOT language
std::string CV_EXPORTS matchesGraphAsString(std::vector<std::string> &pathes, std::vector<MatchesInfo> &pairwise_matches, String CV_EXPORTS matchesGraphAsString(std::vector<String> &pathes, std::vector<MatchesInfo> &pairwise_matches,
float conf_threshold); float conf_threshold);
std::vector<int> CV_EXPORTS leaveBiggestComponent(std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches, std::vector<int> CV_EXPORTS leaveBiggestComponent(
float conf_threshold); std::vector<ImageFeatures> &features,
std::vector<MatchesInfo> &pairwise_matches,
float conf_threshold);
void CV_EXPORTS findMaxSpanningTree(int num_images, const std::vector<MatchesInfo> &pairwise_matches, void CV_EXPORTS findMaxSpanningTree(
Graph &span_tree, std::vector<int> &centers); int num_images, const std::vector<MatchesInfo> &pairwise_matches,
Graph &span_tree, std::vector<int> &centers);
//! @} stitching_rotation
} // namespace detail } // namespace detail
} // namespace cv } // namespace cv
#endif // __OPENCV_STITCHING_MOTION_ESTIMATORS_HPP__ #endif // OPENCV_STITCHING_MOTION_ESTIMATORS_HPP

View File

@ -40,56 +40,78 @@
// //
//M*/ //M*/
#ifndef __OPENCV_STITCHING_SEAM_FINDERS_HPP__ #ifndef OPENCV_STITCHING_SEAM_FINDERS_HPP
#define __OPENCV_STITCHING_SEAM_FINDERS_HPP__ #define OPENCV_STITCHING_SEAM_FINDERS_HPP
#include <set> #include <set>
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
#include "opencv2/core/gpumat.hpp" #include "opencv2/opencv_modules.hpp"
namespace cv { namespace cv {
namespace detail { namespace detail {
//! @addtogroup stitching_seam
//! @{
/** @brief Base class for a seam estimator.
*/
class CV_EXPORTS SeamFinder class CV_EXPORTS SeamFinder
{ {
public: public:
virtual ~SeamFinder() {} virtual ~SeamFinder() {}
virtual void find(const std::vector<Mat> &src, const std::vector<Point> &corners, /** @brief Estimates seams.
std::vector<Mat> &masks) = 0;
@param src Source images
@param corners Source image top-left corners
@param masks Source image masks to update
*/
virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks) = 0;
}; };
/** @brief Stub seam estimator which does nothing.
*/
class CV_EXPORTS NoSeamFinder : public SeamFinder class CV_EXPORTS NoSeamFinder : public SeamFinder
{ {
public: public:
void find(const std::vector<Mat>&, const std::vector<Point>&, std::vector<Mat>&) {} void find(const std::vector<UMat>&, const std::vector<Point>&, std::vector<UMat>&) CV_OVERRIDE {}
}; };
/** @brief Base class for all pairwise seam estimators.
*/
class CV_EXPORTS PairwiseSeamFinder : public SeamFinder class CV_EXPORTS PairwiseSeamFinder : public SeamFinder
{ {
public: public:
virtual void find(const std::vector<Mat> &src, const std::vector<Point> &corners, virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<Mat> &masks); std::vector<UMat> &masks) CV_OVERRIDE;
protected: protected:
void run(); void run();
/** @brief Resolves masks intersection of two specified images in the given ROI.
@param first First image index
@param second Second image index
@param roi Region of interest
*/
virtual void findInPair(size_t first, size_t second, Rect roi) = 0; virtual void findInPair(size_t first, size_t second, Rect roi) = 0;
std::vector<Mat> images_; std::vector<UMat> images_;
std::vector<Size> sizes_; std::vector<Size> sizes_;
std::vector<Point> corners_; std::vector<Point> corners_;
std::vector<Mat> masks_; std::vector<UMat> masks_;
}; };
/** @brief Voronoi diagram-based seam estimator.
*/
class CV_EXPORTS VoronoiSeamFinder : public PairwiseSeamFinder class CV_EXPORTS VoronoiSeamFinder : public PairwiseSeamFinder
{ {
public: public:
virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks) CV_OVERRIDE;
virtual void find(const std::vector<Size> &size, const std::vector<Point> &corners, virtual void find(const std::vector<Size> &size, const std::vector<Point> &corners,
std::vector<Mat> &masks); std::vector<UMat> &masks);
private: private:
void findInPair(size_t first, size_t second, Rect roi); void findInPair(size_t first, size_t second, Rect roi) CV_OVERRIDE;
}; };
@ -103,8 +125,8 @@ public:
CostFunction costFunction() const { return costFunc_; } CostFunction costFunction() const { return costFunc_; }
void setCostFunction(CostFunction val) { costFunc_ = val; } void setCostFunction(CostFunction val) { costFunc_ = val; }
virtual void find(const std::vector<Mat> &src, const std::vector<Point> &corners, virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<Mat> &masks); std::vector<UMat> &masks) CV_OVERRIDE;
private: private:
enum ComponentState enum ComponentState
@ -154,7 +176,7 @@ private:
}; };
void process( void process(
const Mat &image1, const Mat &image2, Point tl1, Point tl2, Mat &mask1, Mat &mask2); const Mat &image1, const Mat &image2, Point tl1, Point tl2, Mat &mask1, Mat &mask2);
void findComponents(); void findComponents();
@ -201,14 +223,16 @@ private:
std::set<std::pair<int, int> > edges_; std::set<std::pair<int, int> > edges_;
}; };
/** @brief Base class for all minimum graph-cut-based seam estimators.
*/
class CV_EXPORTS GraphCutSeamFinderBase class CV_EXPORTS GraphCutSeamFinderBase
{ {
public: public:
enum { COST_COLOR, COST_COLOR_GRAD }; enum CostType { COST_COLOR, COST_COLOR_GRAD };
}; };
/** @brief Minimum graph cut-based seam estimator. See details in @cite V03 .
*/
class CV_EXPORTS GraphCutSeamFinder : public GraphCutSeamFinderBase, public SeamFinder class CV_EXPORTS GraphCutSeamFinder : public GraphCutSeamFinderBase, public SeamFinder
{ {
public: public:
@ -217,8 +241,8 @@ public:
~GraphCutSeamFinder(); ~GraphCutSeamFinder();
void find(const std::vector<Mat> &src, const std::vector<Point> &corners, void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<Mat> &masks); std::vector<UMat> &masks) CV_OVERRIDE;
private: private:
// To avoid GCGraph dependency // To avoid GCGraph dependency
@ -227,6 +251,7 @@ private:
}; };
#ifdef HAVE_OPENCV_CUDALEGACY
class CV_EXPORTS GraphCutSeamFinderGpu : public GraphCutSeamFinderBase, public PairwiseSeamFinder class CV_EXPORTS GraphCutSeamFinderGpu : public GraphCutSeamFinderBase, public PairwiseSeamFinder
{ {
public: public:
@ -235,9 +260,9 @@ public:
: cost_type_(cost_type), terminal_cost_(terminal_cost), : cost_type_(cost_type), terminal_cost_(terminal_cost),
bad_region_penalty_(bad_region_penalty) {} bad_region_penalty_(bad_region_penalty) {}
void find(const std::vector<cv::Mat> &src, const std::vector<cv::Point> &corners, void find(const std::vector<cv::UMat> &src, const std::vector<cv::Point> &corners,
std::vector<cv::Mat> &masks); std::vector<cv::UMat> &masks) CV_OVERRIDE;
void findInPair(size_t first, size_t second, Rect roi); void findInPair(size_t first, size_t second, Rect roi) CV_OVERRIDE;
private: private:
void setGraphWeightsColor(const cv::Mat &img1, const cv::Mat &img2, const cv::Mat &mask1, const cv::Mat &mask2, void setGraphWeightsColor(const cv::Mat &img1, const cv::Mat &img2, const cv::Mat &mask1, const cv::Mat &mask2,
@ -250,8 +275,11 @@ private:
float terminal_cost_; float terminal_cost_;
float bad_region_penalty_; float bad_region_penalty_;
}; };
#endif
//! @}
} // namespace detail } // namespace detail
} // namespace cv } // namespace cv
#endif // __OPENCV_STITCHING_SEAM_FINDERS_HPP__ #endif // OPENCV_STITCHING_SEAM_FINDERS_HPP

View File

@ -40,62 +40,18 @@
// //
//M*/ //M*/
#ifndef __OPENCV_STITCHING_UTIL_HPP__ #ifndef OPENCV_STITCHING_UTIL_HPP
#define __OPENCV_STITCHING_UTIL_HPP__ #define OPENCV_STITCHING_UTIL_HPP
#include <list> #include <list>
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
#define ENABLE_LOG 0
// TODO remove LOG macros, add logging class
#if ENABLE_LOG
#ifdef ANDROID
#include <iostream>
#include <sstream>
#include <android/log.h>
#define LOG_STITCHING_MSG(msg) \
do { \
std::stringstream _os; \
_os << msg; \
__android_log_print(ANDROID_LOG_DEBUG, "STITCHING", "%s", _os.str().c_str()); \
} while(0);
#else
#include <iostream>
#define LOG_STITCHING_MSG(msg) for(;;) { std::cout << msg; std::cout.flush(); break; }
#endif
#else
#define LOG_STITCHING_MSG(msg)
#endif
#define LOG_(_level, _msg) \
for(;;) \
{ \
if ((_level) >= ::cv::detail::stitchingLogLevel()) \
{ \
LOG_STITCHING_MSG(_msg); \
} \
break; \
}
#define LOG(msg) LOG_(1, msg)
#define LOG_CHAT(msg) LOG_(0, msg)
#define LOGLN(msg) LOG(msg << std::endl)
#define LOGLN_CHAT(msg) LOG_CHAT(msg << std::endl)
//#if DEBUG_LOG_CHAT
// #define LOG_CHAT(msg) LOG(msg)
// #define LOGLN_CHAT(msg) LOGLN(msg)
//#else
// #define LOG_CHAT(msg) do{}while(0)
// #define LOGLN_CHAT(msg) do{}while(0)
//#endif
namespace cv { namespace cv {
namespace detail { namespace detail {
//! @addtogroup stitching
//! @{
class CV_EXPORTS DisjointSets class CV_EXPORTS DisjointSets
{ {
public: public:
@ -145,8 +101,9 @@ private:
// Auxiliary functions // Auxiliary functions
CV_EXPORTS bool overlapRoi(Point tl1, Point tl2, Size sz1, Size sz2, Rect &roi); CV_EXPORTS bool overlapRoi(Point tl1, Point tl2, Size sz1, Size sz2, Rect &roi);
CV_EXPORTS Rect resultRoi(const std::vector<Point> &corners, const std::vector<Mat> &images); CV_EXPORTS Rect resultRoi(const std::vector<Point> &corners, const std::vector<UMat> &images);
CV_EXPORTS Rect resultRoi(const std::vector<Point> &corners, const std::vector<Size> &sizes); CV_EXPORTS Rect resultRoi(const std::vector<Point> &corners, const std::vector<Size> &sizes);
CV_EXPORTS Rect resultRoiIntersection(const std::vector<Point> &corners, const std::vector<Size> &sizes);
CV_EXPORTS Point resultTl(const std::vector<Point> &corners); CV_EXPORTS Point resultTl(const std::vector<Point> &corners);
// Returns random 'count' element subset of the {0,1,...,size-1} set // Returns random 'count' element subset of the {0,1,...,size-1} set
@ -154,9 +111,11 @@ CV_EXPORTS void selectRandomSubset(int count, int size, std::vector<int> &subset
CV_EXPORTS int& stitchingLogLevel(); CV_EXPORTS int& stitchingLogLevel();
//! @}
} // namespace detail } // namespace detail
} // namespace cv } // namespace cv
#include "util_inl.hpp" #include "util_inl.hpp"
#endif // __OPENCV_STITCHING_UTIL_HPP__ #endif // OPENCV_STITCHING_UTIL_HPP

View File

@ -40,13 +40,15 @@
// //
//M*/ //M*/
#ifndef __OPENCV_STITCHING_UTIL_INL_HPP__ #ifndef OPENCV_STITCHING_UTIL_INL_HPP
#define __OPENCV_STITCHING_UTIL_INL_HPP__ #define OPENCV_STITCHING_UTIL_INL_HPP
#include <queue> #include <queue>
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
#include "util.hpp" // Make your IDE see declarations #include "util.hpp" // Make your IDE see declarations
//! @cond IGNORED
namespace cv { namespace cv {
namespace detail { namespace detail {
@ -124,4 +126,6 @@ static inline double sqr(double x) { return x * x; }
} // namespace detail } // namespace detail
} // namespace cv } // namespace cv
#endif // __OPENCV_STITCHING_UTIL_INL_HPP__ //! @endcond
#endif // OPENCV_STITCHING_UTIL_INL_HPP

View File

@ -40,43 +40,92 @@
// //
//M*/ //M*/
#ifndef __OPENCV_STITCHING_WARPERS_HPP__ #ifndef OPENCV_STITCHING_WARPERS_HPP
#define __OPENCV_STITCHING_WARPERS_HPP__ #define OPENCV_STITCHING_WARPERS_HPP
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
#include "opencv2/core/gpumat.hpp" #include "opencv2/core/cuda.hpp"
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/imgproc.hpp"
#include "opencv2/opencv_modules.hpp"
namespace cv { namespace cv {
namespace detail { namespace detail {
//! @addtogroup stitching_warp
//! @{
/** @brief Rotation-only model image warper interface.
*/
class CV_EXPORTS RotationWarper class CV_EXPORTS RotationWarper
{ {
public: public:
virtual ~RotationWarper() {} virtual ~RotationWarper() {}
virtual Point2f warpPoint(const Point2f &pt, const Mat &K, const Mat &R) = 0; /** @brief Projects the image point.
virtual Rect buildMaps(Size src_size, const Mat &K, const Mat &R, Mat &xmap, Mat &ymap) = 0; @param pt Source point
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Projected point
*/
virtual Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) = 0;
virtual Point warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, /** @brief Builds the projection maps according to the given camera data.
Mat &dst) = 0;
virtual void warpBackward(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, @param src_size Source image size
Size dst_size, Mat &dst) = 0; @param K Camera intrinsic parameters
@param R Camera rotation matrix
@param xmap Projection map for the x axis
@param ymap Projection map for the y axis
@return Projected image minimum bounding box
*/
virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) = 0;
virtual Rect warpRoi(Size src_size, const Mat &K, const Mat &R) = 0; /** @brief Projects the image.
float getScale() const { return 1.f; } @param src Source image
void setScale(float) {} @param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp_mode Interpolation mode
@param border_mode Border extrapolation mode
@param dst Projected image
@return Project image top-left corner
*/
virtual Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
OutputArray dst) = 0;
/** @brief Projects the image backward.
@param src Projected image
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp_mode Interpolation mode
@param border_mode Border extrapolation mode
@param dst_size Backward-projected image size
@param dst Backward-projected image
*/
virtual void warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Size dst_size, OutputArray dst) = 0;
/**
@param src_size Source image bounding box
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Projected image minimum bounding box
*/
virtual Rect warpRoi(Size src_size, InputArray K, InputArray R) = 0;
virtual float getScale() const { return 1.f; }
virtual void setScale(float) {}
}; };
/** @brief Base class for warping logic implementation.
*/
struct CV_EXPORTS ProjectorBase struct CV_EXPORTS ProjectorBase
{ {
void setCameraParams(const Mat &K = Mat::eye(3, 3, CV_32F), void setCameraParams(InputArray K = Mat::eye(3, 3, CV_32F),
const Mat &R = Mat::eye(3, 3, CV_32F), InputArray R = Mat::eye(3, 3, CV_32F),
const Mat &T = Mat::zeros(3, 1, CV_32F)); InputArray T = Mat::zeros(3, 1, CV_32F));
float scale; float scale;
float k[9]; float k[9];
@ -86,25 +135,26 @@ struct CV_EXPORTS ProjectorBase
float t[3]; float t[3];
}; };
/** @brief Base class for rotation-based warper using a detail::ProjectorBase_ derived class.
*/
template <class P> template <class P>
class CV_EXPORTS RotationWarperBase : public RotationWarper class CV_EXPORTS_TEMPLATE RotationWarperBase : public RotationWarper
{ {
public: public:
Point2f warpPoint(const Point2f &pt, const Mat &K, const Mat &R); Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE;
Rect buildMaps(Size src_size, const Mat &K, const Mat &R, Mat &xmap, Mat &ymap); Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE;
Point warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Mat &dst); OutputArray dst) CV_OVERRIDE;
void warpBackward(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, void warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Size dst_size, Mat &dst); Size dst_size, OutputArray dst) CV_OVERRIDE;
Rect warpRoi(Size src_size, const Mat &K, const Mat &R); Rect warpRoi(Size src_size, InputArray K, InputArray R) CV_OVERRIDE;
float getScale() const { return projector_.scale; } float getScale() const CV_OVERRIDE{ return projector_.scale; }
void setScale(float val) { projector_.scale = val; } void setScale(float val) CV_OVERRIDE { projector_.scale = val; }
protected: protected:
@ -125,25 +175,61 @@ struct CV_EXPORTS PlaneProjector : ProjectorBase
void mapBackward(float u, float v, float &x, float &y); void mapBackward(float u, float v, float &x, float &y);
}; };
/** @brief Warper that maps an image onto the z = 1 plane.
*/
class CV_EXPORTS PlaneWarper : public RotationWarperBase<PlaneProjector> class CV_EXPORTS PlaneWarper : public RotationWarperBase<PlaneProjector>
{ {
public: public:
/** @brief Construct an instance of the plane warper class.
@param scale Projected image scale multiplier
*/
PlaneWarper(float scale = 1.f) { projector_.scale = scale; } PlaneWarper(float scale = 1.f) { projector_.scale = scale; }
void setScale(float scale) { projector_.scale = scale; } Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE;
Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R, InputArray T);
Point2f warpPoint(const Point2f &pt, const Mat &K, const Mat &R, const Mat &T); virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, OutputArray xmap, OutputArray ymap);
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE;
Rect buildMaps(Size src_size, const Mat &K, const Mat &R, const Mat &T, Mat &xmap, Mat &ymap); Point warp(InputArray src, InputArray K, InputArray R,
int interp_mode, int border_mode, OutputArray dst) CV_OVERRIDE;
virtual Point warp(InputArray src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode,
OutputArray dst);
Point warp(const Mat &src, const Mat &K, const Mat &R, const Mat &T, int interp_mode, int border_mode, Rect warpRoi(Size src_size, InputArray K, InputArray R) CV_OVERRIDE;
Mat &dst); Rect warpRoi(Size src_size, InputArray K, InputArray R, InputArray T);
Rect warpRoi(Size src_size, const Mat &K, const Mat &R, const Mat &T);
protected: protected:
void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br); void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE;
};
/** @brief Affine warper that uses rotations and translations
Uses affine transformation in homogeneous coordinates to represent both rotation and
translation in camera rotation matrix.
*/
class CV_EXPORTS AffineWarper : public PlaneWarper
{
public:
/** @brief Construct an instance of the affine warper class.
@param scale Projected image scale multiplier
*/
AffineWarper(float scale = 1.f) : PlaneWarper(scale) {}
Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE;
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE;
Point warp(InputArray src, InputArray K, InputArray R,
int interp_mode, int border_mode, OutputArray dst) CV_OVERRIDE;
Rect warpRoi(Size src_size, InputArray K, InputArray R) CV_OVERRIDE;
protected:
/** @brief Extracts rotation and translation matrices from matrix H representing
affine transformation in homogeneous coordinates
*/
void getRTfromHomogeneous(InputArray H, Mat &R, Mat &T);
}; };
@ -154,15 +240,26 @@ struct CV_EXPORTS SphericalProjector : ProjectorBase
}; };
// Projects image onto unit sphere with origin at (0, 0, 0). /** @brief Warper that maps an image onto the unit sphere located at the origin.
// Poles are located at (0, -1, 0) and (0, 1, 0) points.
Projects image onto unit sphere with origin at (0, 0, 0) and radius scale, measured in pixels.
A 360 panorama would therefore have a resulting width of 2 * scale * PI pixels.
Poles are located at (0, -1, 0) and (0, 1, 0) points.
*/
class CV_EXPORTS SphericalWarper : public RotationWarperBase<SphericalProjector> class CV_EXPORTS SphericalWarper : public RotationWarperBase<SphericalProjector>
{ {
public: public:
/** @brief Construct an instance of the spherical warper class.
@param scale Radius of the projected sphere, in pixels. An image spanning the
whole sphere will have a width of 2 * scale * PI pixels.
*/
SphericalWarper(float scale) { projector_.scale = scale; } SphericalWarper(float scale) { projector_.scale = scale; }
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE;
Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst) CV_OVERRIDE;
protected: protected:
void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br); void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE;
}; };
@ -173,14 +270,21 @@ struct CV_EXPORTS CylindricalProjector : ProjectorBase
}; };
// Projects image onto x * x + z * z = 1 cylinder /** @brief Warper that maps an image onto the x\*x + z\*z = 1 cylinder.
*/
class CV_EXPORTS CylindricalWarper : public RotationWarperBase<CylindricalProjector> class CV_EXPORTS CylindricalWarper : public RotationWarperBase<CylindricalProjector>
{ {
public: public:
/** @brief Construct an instance of the cylindrical warper class.
@param scale Projected image scale multiplier
*/
CylindricalWarper(float scale) { projector_.scale = scale; } CylindricalWarper(float scale) { projector_.scale = scale; }
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE;
Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst) CV_OVERRIDE;
protected: protected:
void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE
{ {
RotationWarperBase<CylindricalProjector>::detectResultRoiByBorder(src_size, dst_tl, dst_br); RotationWarperBase<CylindricalProjector>::detectResultRoiByBorder(src_size, dst_tl, dst_br);
} }
@ -333,7 +437,7 @@ class CV_EXPORTS PlaneWarperGpu : public PlaneWarper
public: public:
PlaneWarperGpu(float scale = 1.f) : PlaneWarper(scale) {} PlaneWarperGpu(float scale = 1.f) : PlaneWarper(scale) {}
Rect buildMaps(Size src_size, const Mat &K, const Mat &R, Mat &xmap, Mat &ymap) Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE
{ {
Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_); Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_);
d_xmap_.download(xmap); d_xmap_.download(xmap);
@ -341,7 +445,7 @@ public:
return result; return result;
} }
Rect buildMaps(Size src_size, const Mat &K, const Mat &R, const Mat &T, Mat &xmap, Mat &ymap) Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, OutputArray xmap, OutputArray ymap) CV_OVERRIDE
{ {
Rect result = buildMaps(src_size, K, R, T, d_xmap_, d_ymap_); Rect result = buildMaps(src_size, K, R, T, d_xmap_, d_ymap_);
d_xmap_.download(xmap); d_xmap_.download(xmap);
@ -349,8 +453,8 @@ public:
return result; return result;
} }
Point warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Mat &dst) OutputArray dst) CV_OVERRIDE
{ {
d_src_.upload(src); d_src_.upload(src);
Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_); Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_);
@ -358,8 +462,8 @@ public:
return result; return result;
} }
Point warp(const Mat &src, const Mat &K, const Mat &R, const Mat &T, int interp_mode, int border_mode, Point warp(InputArray src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode,
Mat &dst) OutputArray dst) CV_OVERRIDE
{ {
d_src_.upload(src); d_src_.upload(src);
Point result = warp(d_src_, K, R, T, interp_mode, border_mode, d_dst_); Point result = warp(d_src_, K, R, T, interp_mode, border_mode, d_dst_);
@ -367,18 +471,18 @@ public:
return result; return result;
} }
Rect buildMaps(Size src_size, const Mat &K, const Mat &R, gpu::GpuMat &xmap, gpu::GpuMat &ymap); Rect buildMaps(Size src_size, InputArray K, InputArray R, cuda::GpuMat & xmap, cuda::GpuMat & ymap);
Rect buildMaps(Size src_size, const Mat &K, const Mat &R, const Mat &T, gpu::GpuMat &xmap, gpu::GpuMat &ymap); Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, cuda::GpuMat & xmap, cuda::GpuMat & ymap);
Point warp(const gpu::GpuMat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, int interp_mode, int border_mode,
gpu::GpuMat &dst); cuda::GpuMat & dst);
Point warp(const gpu::GpuMat &src, const Mat &K, const Mat &R, const Mat &T, int interp_mode, int border_mode, Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode,
gpu::GpuMat &dst); cuda::GpuMat & dst);
private: private:
gpu::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_; cuda::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_;
}; };
@ -387,7 +491,7 @@ class CV_EXPORTS SphericalWarperGpu : public SphericalWarper
public: public:
SphericalWarperGpu(float scale) : SphericalWarper(scale) {} SphericalWarperGpu(float scale) : SphericalWarper(scale) {}
Rect buildMaps(Size src_size, const Mat &K, const Mat &R, Mat &xmap, Mat &ymap) Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE
{ {
Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_); Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_);
d_xmap_.download(xmap); d_xmap_.download(xmap);
@ -395,8 +499,8 @@ public:
return result; return result;
} }
Point warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Mat &dst) OutputArray dst) CV_OVERRIDE
{ {
d_src_.upload(src); d_src_.upload(src);
Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_); Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_);
@ -404,13 +508,13 @@ public:
return result; return result;
} }
Rect buildMaps(Size src_size, const Mat &K, const Mat &R, gpu::GpuMat &xmap, gpu::GpuMat &ymap); Rect buildMaps(Size src_size, InputArray K, InputArray R, cuda::GpuMat & xmap, cuda::GpuMat & ymap);
Point warp(const gpu::GpuMat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, int interp_mode, int border_mode,
gpu::GpuMat &dst); cuda::GpuMat & dst);
private: private:
gpu::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_; cuda::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_;
}; };
@ -419,7 +523,7 @@ class CV_EXPORTS CylindricalWarperGpu : public CylindricalWarper
public: public:
CylindricalWarperGpu(float scale) : CylindricalWarper(scale) {} CylindricalWarperGpu(float scale) : CylindricalWarper(scale) {}
Rect buildMaps(Size src_size, const Mat &K, const Mat &R, Mat &xmap, Mat &ymap) Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE
{ {
Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_); Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_);
d_xmap_.download(xmap); d_xmap_.download(xmap);
@ -427,8 +531,8 @@ public:
return result; return result;
} }
Point warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Mat &dst) OutputArray dst) CV_OVERRIDE
{ {
d_src_.upload(src); d_src_.upload(src);
Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_); Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_);
@ -436,17 +540,17 @@ public:
return result; return result;
} }
Rect buildMaps(Size src_size, const Mat &K, const Mat &R, gpu::GpuMat &xmap, gpu::GpuMat &ymap); Rect buildMaps(Size src_size, InputArray K, InputArray R, cuda::GpuMat & xmap, cuda::GpuMat & ymap);
Point warp(const gpu::GpuMat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, int interp_mode, int border_mode,
gpu::GpuMat &dst); cuda::GpuMat & dst);
private: private:
gpu::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_; cuda::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_;
}; };
struct SphericalPortraitProjector : ProjectorBase struct CV_EXPORTS SphericalPortraitProjector : ProjectorBase
{ {
void mapForward(float x, float y, float &u, float &v); void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y); void mapBackward(float u, float v, float &x, float &y);
@ -461,10 +565,10 @@ public:
SphericalPortraitWarper(float scale) { projector_.scale = scale; } SphericalPortraitWarper(float scale) { projector_.scale = scale; }
protected: protected:
void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br); void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE;
}; };
struct CylindricalPortraitProjector : ProjectorBase struct CV_EXPORTS CylindricalPortraitProjector : ProjectorBase
{ {
void mapForward(float x, float y, float &u, float &v); void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y); void mapBackward(float u, float v, float &x, float &y);
@ -477,13 +581,13 @@ public:
CylindricalPortraitWarper(float scale) { projector_.scale = scale; } CylindricalPortraitWarper(float scale) { projector_.scale = scale; }
protected: protected:
void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE
{ {
RotationWarperBase<CylindricalPortraitProjector>::detectResultRoiByBorder(src_size, dst_tl, dst_br); RotationWarperBase<CylindricalPortraitProjector>::detectResultRoiByBorder(src_size, dst_tl, dst_br);
} }
}; };
struct PlanePortraitProjector : ProjectorBase struct CV_EXPORTS PlanePortraitProjector : ProjectorBase
{ {
void mapForward(float x, float y, float &u, float &v); void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y); void mapBackward(float u, float v, float &x, float &y);
@ -496,15 +600,17 @@ public:
PlanePortraitWarper(float scale) { projector_.scale = scale; } PlanePortraitWarper(float scale) { projector_.scale = scale; }
protected: protected:
void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE
{ {
RotationWarperBase<PlanePortraitProjector>::detectResultRoiByBorder(src_size, dst_tl, dst_br); RotationWarperBase<PlanePortraitProjector>::detectResultRoiByBorder(src_size, dst_tl, dst_br);
} }
}; };
//! @} stitching_warp
} // namespace detail } // namespace detail
} // namespace cv } // namespace cv
#include "warpers_inl.hpp" #include "warpers_inl.hpp"
#endif // __OPENCV_STITCHING_WARPERS_HPP__ #endif // OPENCV_STITCHING_WARPERS_HPP

View File

@ -40,17 +40,20 @@
// //
//M*/ //M*/
#ifndef __OPENCV_STITCHING_WARPERS_INL_HPP__ #ifndef OPENCV_STITCHING_WARPERS_INL_HPP
#define __OPENCV_STITCHING_WARPERS_INL_HPP__ #define OPENCV_STITCHING_WARPERS_INL_HPP
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
#include "warpers.hpp" // Make your IDE see declarations #include "warpers.hpp" // Make your IDE see declarations
#include <limits>
//! @cond IGNORED
namespace cv { namespace cv {
namespace detail { namespace detail {
template <class P> template <class P>
Point2f RotationWarperBase<P>::warpPoint(const Point2f &pt, const Mat &K, const Mat &R) Point2f RotationWarperBase<P>::warpPoint(const Point2f &pt, InputArray K, InputArray R)
{ {
projector_.setCameraParams(K, R); projector_.setCameraParams(K, R);
Point2f uv; Point2f uv;
@ -60,15 +63,17 @@ Point2f RotationWarperBase<P>::warpPoint(const Point2f &pt, const Mat &K, const
template <class P> template <class P>
Rect RotationWarperBase<P>::buildMaps(Size src_size, const Mat &K, const Mat &R, Mat &xmap, Mat &ymap) Rect RotationWarperBase<P>::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray _xmap, OutputArray _ymap)
{ {
projector_.setCameraParams(K, R); projector_.setCameraParams(K, R);
Point dst_tl, dst_br; Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br); detectResultRoi(src_size, dst_tl, dst_br);
xmap.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F); _xmap.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);
ymap.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F); _ymap.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);
Mat xmap = _xmap.getMat(), ymap = _ymap.getMat();
float x, y; float x, y;
for (int v = dst_tl.y; v <= dst_br.y; ++v) for (int v = dst_tl.y; v <= dst_br.y; ++v)
@ -86,10 +91,10 @@ Rect RotationWarperBase<P>::buildMaps(Size src_size, const Mat &K, const Mat &R,
template <class P> template <class P>
Point RotationWarperBase<P>::warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, Point RotationWarperBase<P>::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Mat &dst) OutputArray dst)
{ {
Mat xmap, ymap; UMat xmap, ymap;
Rect dst_roi = buildMaps(src.size(), K, R, xmap, ymap); Rect dst_roi = buildMaps(src.size(), K, R, xmap, ymap);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type()); dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
@ -100,14 +105,16 @@ Point RotationWarperBase<P>::warp(const Mat &src, const Mat &K, const Mat &R, in
template <class P> template <class P>
void RotationWarperBase<P>::warpBackward(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, void RotationWarperBase<P>::warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Size dst_size, Mat &dst) Size dst_size, OutputArray dst)
{ {
projector_.setCameraParams(K, R); projector_.setCameraParams(K, R);
Point src_tl, src_br; Point src_tl, src_br;
detectResultRoi(dst_size, src_tl, src_br); detectResultRoi(dst_size, src_tl, src_br);
CV_Assert(src_br.x - src_tl.x + 1 == src.cols && src_br.y - src_tl.y + 1 == src.rows);
Size size = src.size();
CV_Assert(src_br.x - src_tl.x + 1 == size.width && src_br.y - src_tl.y + 1 == size.height);
Mat xmap(dst_size, CV_32F); Mat xmap(dst_size, CV_32F);
Mat ymap(dst_size, CV_32F); Mat ymap(dst_size, CV_32F);
@ -129,7 +136,7 @@ void RotationWarperBase<P>::warpBackward(const Mat &src, const Mat &K, const Mat
template <class P> template <class P>
Rect RotationWarperBase<P>::warpRoi(Size src_size, const Mat &K, const Mat &R) Rect RotationWarperBase<P>::warpRoi(Size src_size, InputArray K, InputArray R)
{ {
projector_.setCameraParams(K, R); projector_.setCameraParams(K, R);
@ -143,10 +150,10 @@ Rect RotationWarperBase<P>::warpRoi(Size src_size, const Mat &K, const Mat &R)
template <class P> template <class P>
void RotationWarperBase<P>::detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) void RotationWarperBase<P>::detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)
{ {
float tl_uf = std::numeric_limits<float>::max(); float tl_uf = (std::numeric_limits<float>::max)();
float tl_vf = std::numeric_limits<float>::max(); float tl_vf = (std::numeric_limits<float>::max)();
float br_uf = -std::numeric_limits<float>::max(); float br_uf = -(std::numeric_limits<float>::max)();
float br_vf = -std::numeric_limits<float>::max(); float br_vf = -(std::numeric_limits<float>::max)();
float u, v; float u, v;
for (int y = 0; y < src_size.height; ++y) for (int y = 0; y < src_size.height; ++y)
@ -154,8 +161,8 @@ void RotationWarperBase<P>::detectResultRoi(Size src_size, Point &dst_tl, Point
for (int x = 0; x < src_size.width; ++x) for (int x = 0; x < src_size.width; ++x)
{ {
projector_.mapForward(static_cast<float>(x), static_cast<float>(y), u, v); projector_.mapForward(static_cast<float>(x), static_cast<float>(y), u, v);
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v); tl_uf = (std::min)(tl_uf, u); tl_vf = (std::min)(tl_vf, v);
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v); br_uf = (std::max)(br_uf, u); br_vf = (std::max)(br_vf, v);
} }
} }
@ -169,31 +176,31 @@ void RotationWarperBase<P>::detectResultRoi(Size src_size, Point &dst_tl, Point
template <class P> template <class P>
void RotationWarperBase<P>::detectResultRoiByBorder(Size src_size, Point &dst_tl, Point &dst_br) void RotationWarperBase<P>::detectResultRoiByBorder(Size src_size, Point &dst_tl, Point &dst_br)
{ {
float tl_uf = std::numeric_limits<float>::max(); float tl_uf = (std::numeric_limits<float>::max)();
float tl_vf = std::numeric_limits<float>::max(); float tl_vf = (std::numeric_limits<float>::max)();
float br_uf = -std::numeric_limits<float>::max(); float br_uf = -(std::numeric_limits<float>::max)();
float br_vf = -std::numeric_limits<float>::max(); float br_vf = -(std::numeric_limits<float>::max)();
float u, v; float u, v;
for (float x = 0; x < src_size.width; ++x) for (float x = 0; x < src_size.width; ++x)
{ {
projector_.mapForward(static_cast<float>(x), 0, u, v); projector_.mapForward(static_cast<float>(x), 0, u, v);
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v); tl_uf = (std::min)(tl_uf, u); tl_vf = (std::min)(tl_vf, v);
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v); br_uf = (std::max)(br_uf, u); br_vf = (std::max)(br_vf, v);
projector_.mapForward(static_cast<float>(x), static_cast<float>(src_size.height - 1), u, v); projector_.mapForward(static_cast<float>(x), static_cast<float>(src_size.height - 1), u, v);
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v); tl_uf = (std::min)(tl_uf, u); tl_vf = (std::min)(tl_vf, v);
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v); br_uf = (std::max)(br_uf, u); br_vf = (std::max)(br_vf, v);
} }
for (int y = 0; y < src_size.height; ++y) for (int y = 0; y < src_size.height; ++y)
{ {
projector_.mapForward(0, static_cast<float>(y), u, v); projector_.mapForward(0, static_cast<float>(y), u, v);
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v); tl_uf = (std::min)(tl_uf, u); tl_vf = (std::min)(tl_vf, v);
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v); br_uf = (std::max)(br_uf, u); br_vf = (std::max)(br_vf, v);
projector_.mapForward(static_cast<float>(src_size.width - 1), static_cast<float>(y), u, v); projector_.mapForward(static_cast<float>(src_size.width - 1), static_cast<float>(y), u, v);
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v); tl_uf = (std::min)(tl_uf, u); tl_vf = (std::min)(tl_vf, v);
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v); br_uf = (std::max)(br_uf, u); br_vf = (std::max)(br_vf, v);
} }
dst_tl.x = static_cast<int>(tl_uf); dst_tl.x = static_cast<int>(tl_uf);
@ -762,4 +769,6 @@ void PlanePortraitProjector::mapBackward(float u0, float v0, float &x, float &y)
} // namespace detail } // namespace detail
} // namespace cv } // namespace cv
#endif // __OPENCV_STITCHING_WARPERS_INL_HPP__ //! @endcond
#endif // OPENCV_STITCHING_WARPERS_INL_HPP

View File

@ -40,13 +40,18 @@
// //
//M*/ //M*/
#ifndef __OPENCV_STITCHING_WARPER_CREATORS_HPP__ #ifndef OPENCV_STITCHING_WARPER_CREATORS_HPP
#define __OPENCV_STITCHING_WARPER_CREATORS_HPP__ #define OPENCV_STITCHING_WARPER_CREATORS_HPP
#include "opencv2/stitching/detail/warpers.hpp" #include "opencv2/stitching/detail/warpers.hpp"
namespace cv { namespace cv {
//! @addtogroup stitching_warp
//! @{
/** @brief Image warper factories base class.
*/
class WarperCreator class WarperCreator
{ {
public: public:
@ -54,37 +59,50 @@ public:
virtual Ptr<detail::RotationWarper> create(float scale) const = 0; virtual Ptr<detail::RotationWarper> create(float scale) const = 0;
}; };
/** @brief Plane warper factory class.
@sa detail::PlaneWarper
*/
class PlaneWarper : public WarperCreator class PlaneWarper : public WarperCreator
{ {
public: public:
Ptr<detail::RotationWarper> create(float scale) const { return new detail::PlaneWarper(scale); } Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::PlaneWarper>(scale); }
}; };
/** @brief Affine warper factory class.
@sa detail::AffineWarper
*/
class AffineWarper : public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::AffineWarper>(scale); }
};
/** @brief Cylindrical warper factory class.
@sa detail::CylindricalWarper
*/
class CylindricalWarper: public WarperCreator class CylindricalWarper: public WarperCreator
{ {
public: public:
Ptr<detail::RotationWarper> create(float scale) const { return new detail::CylindricalWarper(scale); } Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::CylindricalWarper>(scale); }
}; };
/** @brief Spherical warper factory class */
class SphericalWarper: public WarperCreator class SphericalWarper: public WarperCreator
{ {
public: public:
Ptr<detail::RotationWarper> create(float scale) const { return new detail::SphericalWarper(scale); } Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::SphericalWarper>(scale); }
}; };
class FisheyeWarper : public WarperCreator class FisheyeWarper : public WarperCreator
{ {
public: public:
Ptr<detail::RotationWarper> create(float scale) const { return new detail::FisheyeWarper(scale); } Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::FisheyeWarper>(scale); }
}; };
class StereographicWarper: public WarperCreator class StereographicWarper: public WarperCreator
{ {
public: public:
Ptr<detail::RotationWarper> create(float scale) const { return new detail::StereographicWarper(scale); } Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::StereographicWarper>(scale); }
}; };
class CompressedRectilinearWarper: public WarperCreator class CompressedRectilinearWarper: public WarperCreator
@ -95,7 +113,7 @@ public:
{ {
a = A; b = B; a = A; b = B;
} }
Ptr<detail::RotationWarper> create(float scale) const { return new detail::CompressedRectilinearWarper(scale, a, b); } Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::CompressedRectilinearWarper>(scale, a, b); }
}; };
class CompressedRectilinearPortraitWarper: public WarperCreator class CompressedRectilinearPortraitWarper: public WarperCreator
@ -106,7 +124,7 @@ public:
{ {
a = A; b = B; a = A; b = B;
} }
Ptr<detail::RotationWarper> create(float scale) const { return new detail::CompressedRectilinearPortraitWarper(scale, a, b); } Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::CompressedRectilinearPortraitWarper>(scale, a, b); }
}; };
class PaniniWarper: public WarperCreator class PaniniWarper: public WarperCreator
@ -117,7 +135,7 @@ public:
{ {
a = A; b = B; a = A; b = B;
} }
Ptr<detail::RotationWarper> create(float scale) const { return new detail::PaniniWarper(scale, a, b); } Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::PaniniWarper>(scale, a, b); }
}; };
class PaniniPortraitWarper: public WarperCreator class PaniniPortraitWarper: public WarperCreator
@ -128,43 +146,47 @@ public:
{ {
a = A; b = B; a = A; b = B;
} }
Ptr<detail::RotationWarper> create(float scale) const { return new detail::PaniniPortraitWarper(scale, a, b); } Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::PaniniPortraitWarper>(scale, a, b); }
}; };
class MercatorWarper: public WarperCreator class MercatorWarper: public WarperCreator
{ {
public: public:
Ptr<detail::RotationWarper> create(float scale) const { return new detail::MercatorWarper(scale); } Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::MercatorWarper>(scale); }
}; };
class TransverseMercatorWarper: public WarperCreator class TransverseMercatorWarper: public WarperCreator
{ {
public: public:
Ptr<detail::RotationWarper> create(float scale) const { return new detail::TransverseMercatorWarper(scale); } Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::TransverseMercatorWarper>(scale); }
}; };
#ifdef HAVE_OPENCV_CUDAWARPING
class PlaneWarperGpu: public WarperCreator class PlaneWarperGpu: public WarperCreator
{ {
public: public:
Ptr<detail::RotationWarper> create(float scale) const { return new detail::PlaneWarperGpu(scale); } Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::PlaneWarperGpu>(scale); }
}; };
class CylindricalWarperGpu: public WarperCreator class CylindricalWarperGpu: public WarperCreator
{ {
public: public:
Ptr<detail::RotationWarper> create(float scale) const { return new detail::CylindricalWarperGpu(scale); } Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::CylindricalWarperGpu>(scale); }
}; };
class SphericalWarperGpu: public WarperCreator class SphericalWarperGpu: public WarperCreator
{ {
public: public:
Ptr<detail::RotationWarper> create(float scale) const { return new detail::SphericalWarperGpu(scale); } Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::SphericalWarperGpu>(scale); }
}; };
#endif
//! @} stitching_warp
} // namespace cv } // namespace cv
#endif // __OPENCV_STITCHING_WARPER_CREATORS_HPP__ #endif // OPENCV_STITCHING_WARPER_CREATORS_HPP

View File

@ -40,15 +40,19 @@
// //
//M*/ //M*/
#ifndef __OPENCV_SUPERRES_OPTICAL_FLOW_HPP__ #ifndef OPENCV_SUPERRES_OPTICAL_FLOW_HPP
#define __OPENCV_SUPERRES_OPTICAL_FLOW_HPP__ #define OPENCV_SUPERRES_OPTICAL_FLOW_HPP
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
namespace cv namespace cv
{ {
namespace superres namespace superres
{ {
//! @addtogroup superres
//! @{
class CV_EXPORTS DenseOpticalFlowExt : public cv::Algorithm class CV_EXPORTS DenseOpticalFlowExt : public cv::Algorithm
{ {
public: public:
@ -56,21 +60,144 @@ namespace cv
virtual void collectGarbage() = 0; virtual void collectGarbage() = 0;
}; };
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback_GPU();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback_OCL();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Simple(); class CV_EXPORTS FarnebackOpticalFlow : public virtual DenseOpticalFlowExt
{
public:
/** @see setPyrScale */
virtual double getPyrScale() const = 0;
/** @copybrief getPyrScale @see getPyrScale */
virtual void setPyrScale(double val) = 0;
/** @see setLevelsNumber */
virtual int getLevelsNumber() const = 0;
/** @copybrief getLevelsNumber @see getLevelsNumber */
virtual void setLevelsNumber(int val) = 0;
/** @see setWindowSize */
virtual int getWindowSize() const = 0;
/** @copybrief getWindowSize @see getWindowSize */
virtual void setWindowSize(int val) = 0;
/** @see setIterations */
virtual int getIterations() const = 0;
/** @copybrief getIterations @see getIterations */
virtual void setIterations(int val) = 0;
/** @see setPolyN */
virtual int getPolyN() const = 0;
/** @copybrief getPolyN @see getPolyN */
virtual void setPolyN(int val) = 0;
/** @see setPolySigma */
virtual double getPolySigma() const = 0;
/** @copybrief getPolySigma @see getPolySigma */
virtual void setPolySigma(double val) = 0;
/** @see setFlags */
virtual int getFlags() const = 0;
/** @copybrief getFlags @see getFlags */
virtual void setFlags(int val) = 0;
};
CV_EXPORTS Ptr<FarnebackOpticalFlow> createOptFlow_Farneback();
CV_EXPORTS Ptr<FarnebackOpticalFlow> createOptFlow_Farneback_CUDA();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1_GPU();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1_OCL();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Brox_GPU(); // CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Simple();
class CV_EXPORTS DualTVL1OpticalFlow : public virtual DenseOpticalFlowExt
{
public:
/** @see setTau */
virtual double getTau() const = 0;
/** @copybrief getTau @see getTau */
virtual void setTau(double val) = 0;
/** @see setLambda */
virtual double getLambda() const = 0;
/** @copybrief getLambda @see getLambda */
virtual void setLambda(double val) = 0;
/** @see setTheta */
virtual double getTheta() const = 0;
/** @copybrief getTheta @see getTheta */
virtual void setTheta(double val) = 0;
/** @see setScalesNumber */
virtual int getScalesNumber() const = 0;
/** @copybrief getScalesNumber @see getScalesNumber */
virtual void setScalesNumber(int val) = 0;
/** @see setWarpingsNumber */
virtual int getWarpingsNumber() const = 0;
/** @copybrief getWarpingsNumber @see getWarpingsNumber */
virtual void setWarpingsNumber(int val) = 0;
/** @see setEpsilon */
virtual double getEpsilon() const = 0;
/** @copybrief getEpsilon @see getEpsilon */
virtual void setEpsilon(double val) = 0;
/** @see setIterations */
virtual int getIterations() const = 0;
/** @copybrief getIterations @see getIterations */
virtual void setIterations(int val) = 0;
/** @see setUseInitialFlow */
virtual bool getUseInitialFlow() const = 0;
/** @copybrief getUseInitialFlow @see getUseInitialFlow */
virtual void setUseInitialFlow(bool val) = 0;
};
CV_EXPORTS Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1();
CV_EXPORTS Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1_CUDA();
class CV_EXPORTS BroxOpticalFlow : public virtual DenseOpticalFlowExt
{
public:
//! @brief Flow smoothness
/** @see setAlpha */
virtual double getAlpha() const = 0;
/** @copybrief getAlpha @see getAlpha */
virtual void setAlpha(double val) = 0;
//! @brief Gradient constancy importance
/** @see setGamma */
virtual double getGamma() const = 0;
/** @copybrief getGamma @see getGamma */
virtual void setGamma(double val) = 0;
//! @brief Pyramid scale factor
/** @see setScaleFactor */
virtual double getScaleFactor() const = 0;
/** @copybrief getScaleFactor @see getScaleFactor */
virtual void setScaleFactor(double val) = 0;
//! @brief Number of lagged non-linearity iterations (inner loop)
/** @see setInnerIterations */
virtual int getInnerIterations() const = 0;
/** @copybrief getInnerIterations @see getInnerIterations */
virtual void setInnerIterations(int val) = 0;
//! @brief Number of warping iterations (number of pyramid levels)
/** @see setOuterIterations */
virtual int getOuterIterations() const = 0;
/** @copybrief getOuterIterations @see getOuterIterations */
virtual void setOuterIterations(int val) = 0;
//! @brief Number of linear system solver iterations
/** @see setSolverIterations */
virtual int getSolverIterations() const = 0;
/** @copybrief getSolverIterations @see getSolverIterations */
virtual void setSolverIterations(int val) = 0;
};
CV_EXPORTS Ptr<BroxOpticalFlow> createOptFlow_Brox_CUDA();
class PyrLKOpticalFlow : public virtual DenseOpticalFlowExt
{
public:
/** @see setWindowSize */
virtual int getWindowSize() const = 0;
/** @copybrief getWindowSize @see getWindowSize */
virtual void setWindowSize(int val) = 0;
/** @see setMaxLevel */
virtual int getMaxLevel() const = 0;
/** @copybrief getMaxLevel @see getMaxLevel */
virtual void setMaxLevel(int val) = 0;
/** @see setIterations */
virtual int getIterations() const = 0;
/** @copybrief getIterations @see getIterations */
virtual void setIterations(int val) = 0;
};
CV_EXPORTS Ptr<PyrLKOpticalFlow> createOptFlow_PyrLK_CUDA();
//! @}
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_PyrLK_GPU();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_PyrLK_OCL();
} }
} }
#endif // __OPENCV_SUPERRES_OPTICAL_FLOW_HPP__ #endif // OPENCV_SUPERRES_OPTICAL_FLOW_HPP

View File

@ -7,11 +7,12 @@
// copy or use the software. // copy or use the software.
// //
// //
// License Agreement // License Agreement
// For Open Source Computer Vision Library // For Open Source Computer Vision Library
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -40,223 +41,277 @@
// //
//M*/ //M*/
#ifndef __OPENCV_BACKGROUND_SEGM_HPP__ #ifndef OPENCV_BACKGROUND_SEGM_HPP
#define __OPENCV_BACKGROUND_SEGM_HPP__ #define OPENCV_BACKGROUND_SEGM_HPP
#include "opencv2/core.hpp"
#include "opencv2/core/core.hpp"
#include <list>
namespace cv namespace cv
{ {
/*! //! @addtogroup video_motion
The Base Class for Background/Foreground Segmentation //! @{
The class is only used to define the common interface for /** @brief Base class for background/foreground segmentation. :
the whole family of background/foreground segmentation algorithms.
*/ The class is only used to define the common interface for the whole family of background/foreground
segmentation algorithms.
*/
class CV_EXPORTS_W BackgroundSubtractor : public Algorithm class CV_EXPORTS_W BackgroundSubtractor : public Algorithm
{ {
public: public:
//! the virtual destructor /** @brief Computes a foreground mask.
virtual ~BackgroundSubtractor();
//! the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image.
CV_WRAP_AS(apply) virtual void operator()(InputArray image, OutputArray fgmask,
double learningRate=0);
//! computes a background image @param image Next video frame.
virtual void getBackgroundImage(OutputArray backgroundImage) const; @param fgmask The output foreground mask as an 8-bit binary image.
@param learningRate The value between 0 and 1 that indicates how fast the background model is
learnt. Negative parameter value makes the algorithm to use some automatically chosen learning
rate. 0 means that the background model is not updated at all, 1 means that the background model
is completely reinitialized from the last frame.
*/
CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) = 0;
/** @brief Computes a background image.
@param backgroundImage The output background image.
@note Sometimes the background image can be very blurry, as it contain the average background
statistics.
*/
CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage) const = 0;
}; };
/*! /** @brief Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm
The class implements the following algorithm: The class implements the Gaussian mixture model background subtraction described in @cite Zivkovic2004
"An improved adaptive background mixture model for real-time tracking with shadow detection" and @cite Zivkovic2006 .
P. KadewTraKuPong and R. Bowden, */
Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
*/
class CV_EXPORTS_W BackgroundSubtractorMOG : public BackgroundSubtractor
{
public:
//! the default constructor
CV_WRAP BackgroundSubtractorMOG();
//! the full constructor that takes the length of the history, the number of gaussian mixtures, the background ratio parameter and the noise strength
CV_WRAP BackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma=0);
//! the destructor
virtual ~BackgroundSubtractorMOG();
//! the update operator
virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=0);
//! re-initiaization method
virtual void initialize(Size frameSize, int frameType);
virtual AlgorithmInfo* info() const;
protected:
Size frameSize;
int frameType;
Mat bgmodel;
int nframes;
int history;
int nmixtures;
double varThreshold;
double backgroundRatio;
double noiseSigma;
};
/*!
The class implements the following algorithm:
"Improved adaptive Gausian mixture model for background subtraction"
Z.Zivkovic
International Conference Pattern Recognition, UK, August, 2004.
http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf
*/
class CV_EXPORTS_W BackgroundSubtractorMOG2 : public BackgroundSubtractor class CV_EXPORTS_W BackgroundSubtractorMOG2 : public BackgroundSubtractor
{ {
public: public:
//! the default constructor /** @brief Returns the number of last frames that affect the background model
CV_WRAP BackgroundSubtractorMOG2(); */
//! the full constructor that takes the length of the history, the number of gaussian mixtures, the background ratio parameter and the noise strength CV_WRAP virtual int getHistory() const = 0;
CV_WRAP BackgroundSubtractorMOG2(int history, float varThreshold, bool bShadowDetection=true); /** @brief Sets the number of last frames that affect the background model
//! the destructor */
virtual ~BackgroundSubtractorMOG2(); CV_WRAP virtual void setHistory(int history) = 0;
//! the update operator
virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=-1);
//! computes a background image which are the mean of all background gaussians /** @brief Returns the number of gaussian components in the background model
virtual void getBackgroundImage(OutputArray backgroundImage) const; */
CV_WRAP virtual int getNMixtures() const = 0;
/** @brief Sets the number of gaussian components in the background model.
//! re-initiaization method The model needs to be reinitalized to reserve memory.
virtual void initialize(Size frameSize, int frameType); */
CV_WRAP virtual void setNMixtures(int nmixtures) = 0;//needs reinitialization!
virtual AlgorithmInfo* info() const; /** @brief Returns the "background ratio" parameter of the algorithm
protected: If a foreground pixel keeps semi-constant value for about backgroundRatio\*history frames, it's
Size frameSize; considered background and added to the model as a center of a new component. It corresponds to TB
int frameType; parameter in the paper.
Mat bgmodel; */
Mat bgmodelUsedModes;//keep track of number of modes per pixel CV_WRAP virtual double getBackgroundRatio() const = 0;
int nframes; /** @brief Sets the "background ratio" parameter of the algorithm
int history; */
int nmixtures; CV_WRAP virtual void setBackgroundRatio(double ratio) = 0;
//! here it is the maximum allowed number of mixture components.
//! Actual number is determined dynamically per pixel
double varThreshold;
// threshold on the squared Mahalanobis distance to decide if it is well described
// by the background model or not. Related to Cthr from the paper.
// This does not influence the update of the background. A typical value could be 4 sigma
// and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
///////////////////////// /** @brief Returns the variance threshold for the pixel-model match
// less important parameters - things you might change but be carefull
////////////////////////
float backgroundRatio;
// corresponds to fTB=1-cf from the paper
// TB - threshold when the component becomes significant enough to be included into
// the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
// For alpha=0.001 it means that the mode should exist for approximately 105 frames before
// it is considered foreground
// float noiseSigma;
float varThresholdGen;
//correspondts to Tg - threshold on the squared Mahalan. dist. to decide
//when a sample is close to the existing components. If it is not close
//to any a new component will be generated. I use 3 sigma => Tg=3*3=9.
//Smaller Tg leads to more generated components and higher Tg might make
//lead to small number of components but they can grow too large
float fVarInit;
float fVarMin;
float fVarMax;
//initial variance for the newly generated components.
//It will will influence the speed of adaptation. A good guess should be made.
//A simple way is to estimate the typical standard deviation from the images.
//I used here 10 as a reasonable value
// min and max can be used to further control the variance
float fCT;//CT - complexity reduction prior
//this is related to the number of samples needed to accept that a component
//actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get
//the standard Stauffer&Grimson algorithm (maybe not exact but very similar)
//shadow detection parameters The main threshold on the squared Mahalanobis distance to decide if the sample is well described by
bool bShadowDetection;//default 1 - do shadow detection the background model or not. Related to Cthr from the paper.
unsigned char nShadowDetection;//do shadow detection - insert this value as the detection result - 127 default value */
float fTau; CV_WRAP virtual double getVarThreshold() const = 0;
// Tau - shadow threshold. The shadow is detected if the pixel is darker /** @brief Sets the variance threshold for the pixel-model match
//version of the background. Tau is a threshold on how much darker the shadow can be. */
//Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow CV_WRAP virtual void setVarThreshold(double varThreshold) = 0;
//See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
/** @brief Returns the variance threshold for the pixel-model match used for new mixture component generation
Threshold for the squared Mahalanobis distance that helps decide when a sample is close to the
existing components (corresponds to Tg in the paper). If a pixel is not close to any component, it
is considered foreground or added as a new component. 3 sigma =\> Tg=3\*3=9 is default. A smaller Tg
value generates more components. A higher Tg value may result in a small number of components but
they can grow too large.
*/
CV_WRAP virtual double getVarThresholdGen() const = 0;
/** @brief Sets the variance threshold for the pixel-model match used for new mixture component generation
*/
CV_WRAP virtual void setVarThresholdGen(double varThresholdGen) = 0;
/** @brief Returns the initial variance of each gaussian component
*/
CV_WRAP virtual double getVarInit() const = 0;
/** @brief Sets the initial variance of each gaussian component
*/
CV_WRAP virtual void setVarInit(double varInit) = 0;
CV_WRAP virtual double getVarMin() const = 0;
CV_WRAP virtual void setVarMin(double varMin) = 0;
CV_WRAP virtual double getVarMax() const = 0;
CV_WRAP virtual void setVarMax(double varMax) = 0;
/** @brief Returns the complexity reduction threshold
This parameter defines the number of samples needed to accept to prove the component exists. CT=0.05
is a default value for all the samples. By setting CT=0 you get an algorithm very similar to the
standard Stauffer&Grimson algorithm.
*/
CV_WRAP virtual double getComplexityReductionThreshold() const = 0;
/** @brief Sets the complexity reduction threshold
*/
CV_WRAP virtual void setComplexityReductionThreshold(double ct) = 0;
/** @brief Returns the shadow detection flag
If true, the algorithm detects shadows and marks them. See createBackgroundSubtractorMOG2 for
details.
*/
CV_WRAP virtual bool getDetectShadows() const = 0;
/** @brief Enables or disables shadow detection
*/
CV_WRAP virtual void setDetectShadows(bool detectShadows) = 0;
/** @brief Returns the shadow value
Shadow value is the value used to mark shadows in the foreground mask. Default value is 127. Value 0
in the mask always means background, 255 means foreground.
*/
CV_WRAP virtual int getShadowValue() const = 0;
/** @brief Sets the shadow value
*/
CV_WRAP virtual void setShadowValue(int value) = 0;
/** @brief Returns the shadow threshold
A shadow is detected if pixel is a darker version of the background. The shadow threshold (Tau in
the paper) is a threshold defining how much darker the shadow can be. Tau= 0.5 means that if a pixel
is more than twice darker then it is not shadow. See Prati, Mikic, Trivedi and Cucchiara,
*Detecting Moving Shadows...*, IEEE PAMI,2003.
*/
CV_WRAP virtual double getShadowThreshold() const = 0;
/** @brief Sets the shadow threshold
*/
CV_WRAP virtual void setShadowThreshold(double threshold) = 0;
/** @brief Computes a foreground mask.
@param image Next video frame. Floating point frame will be used without scaling and should be in range \f$[0,255]\f$.
@param fgmask The output foreground mask as an 8-bit binary image.
@param learningRate The value between 0 and 1 that indicates how fast the background model is
learnt. Negative parameter value makes the algorithm to use some automatically chosen learning
rate. 0 means that the background model is not updated at all, 1 means that the background model
is completely reinitialized from the last frame.
*/
CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0;
}; };
/** /** @brief Creates MOG2 Background Subtractor
* Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1)
* images of the same size, where 255 indicates Foreground and 0 represents Background. @param history Length of the history.
* This class implements an algorithm described in "Visual Tracking of Human Visitors under @param varThreshold Threshold on the squared Mahalanobis distance between the pixel and the model
* Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere, to decide whether a pixel is well described by the background model. This parameter does not
* A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012. affect the background update.
@param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the
speed a bit, so if you do not need this feature, set the parameter to false.
*/ */
class CV_EXPORTS BackgroundSubtractorGMG: public cv::BackgroundSubtractor CV_EXPORTS_W Ptr<BackgroundSubtractorMOG2>
createBackgroundSubtractorMOG2(int history=500, double varThreshold=16,
bool detectShadows=true);
/** @brief K-nearest neighbours - based Background/Foreground Segmentation Algorithm.
The class implements the K-nearest neighbours background subtraction described in @cite Zivkovic2006 .
Very efficient if number of foreground pixels is low.
*/
class CV_EXPORTS_W BackgroundSubtractorKNN : public BackgroundSubtractor
{ {
public: public:
BackgroundSubtractorGMG(); /** @brief Returns the number of last frames that affect the background model
virtual ~BackgroundSubtractorGMG(); */
virtual AlgorithmInfo* info() const; CV_WRAP virtual int getHistory() const = 0;
/** @brief Sets the number of last frames that affect the background model
*/
CV_WRAP virtual void setHistory(int history) = 0;
/** /** @brief Returns the number of data samples in the background model
* Validate parameters and set up data structures for appropriate image size. */
* Must call before running on data. CV_WRAP virtual int getNSamples() const = 0;
* @param frameSize input frame size /** @brief Sets the number of data samples in the background model.
* @param min minimum value taken on by pixels in image sequence. Usually 0
* @param max maximum value taken on by pixels in image sequence. e.g. 1.0 or 255 The model needs to be reinitalized to reserve memory.
*/
CV_WRAP virtual void setNSamples(int _nN) = 0;//needs reinitialization!
/** @brief Returns the threshold on the squared distance between the pixel and the sample
The threshold on the squared distance between the pixel and the sample to decide whether a pixel is
close to a data sample.
*/ */
void initialize(cv::Size frameSize, double min, double max); CV_WRAP virtual double getDist2Threshold() const = 0;
/** @brief Sets the threshold on the squared distance
*/
CV_WRAP virtual void setDist2Threshold(double _dist2Threshold) = 0;
/** /** @brief Returns the number of neighbours, the k in the kNN.
* Performs single-frame background subtraction and builds up a statistical background image
* model. K is the number of samples that need to be within dist2Threshold in order to decide that that
* @param image Input image pixel is matching the kNN background model.
* @param fgmask Output mask image representing foreground and background pixels
*/ */
virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=-1.0); CV_WRAP virtual int getkNNSamples() const = 0;
/** @brief Sets the k in the kNN. How many nearest neighbours need to match.
*/
CV_WRAP virtual void setkNNSamples(int _nkNN) = 0;
/** /** @brief Returns the shadow detection flag
* Releases all inner buffers.
If true, the algorithm detects shadows and marks them. See createBackgroundSubtractorKNN for
details.
*/ */
void release(); CV_WRAP virtual bool getDetectShadows() const = 0;
/** @brief Enables or disables shadow detection
*/
CV_WRAP virtual void setDetectShadows(bool detectShadows) = 0;
//! Total number of distinct colors to maintain in histogram. /** @brief Returns the shadow value
int maxFeatures;
//! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms.
double learningRate;
//! Number of frames of video to use to initialize histograms.
int numInitializationFrames;
//! Number of discrete levels in each channel to be used in histograms.
int quantizationLevels;
//! Prior probability that any given pixel is a background pixel. A sensitivity parameter.
double backgroundPrior;
//! Value above which pixel is determined to be FG.
double decisionThreshold;
//! Smoothing radius, in pixels, for cleaning up FG image.
int smoothingRadius;
//! Perform background model update
bool updateBackgroundModel;
private: Shadow value is the value used to mark shadows in the foreground mask. Default value is 127. Value 0
double maxVal_; in the mask always means background, 255 means foreground.
double minVal_; */
CV_WRAP virtual int getShadowValue() const = 0;
/** @brief Sets the shadow value
*/
CV_WRAP virtual void setShadowValue(int value) = 0;
cv::Size frameSize_; /** @brief Returns the shadow threshold
int frameNum_;
cv::Mat_<int> nfeatures_; A shadow is detected if pixel is a darker version of the background. The shadow threshold (Tau in
cv::Mat_<unsigned int> colors_; the paper) is a threshold defining how much darker the shadow can be. Tau= 0.5 means that if a pixel
cv::Mat_<float> weights_; is more than twice darker then it is not shadow. See Prati, Mikic, Trivedi and Cucchiara,
*Detecting Moving Shadows...*, IEEE PAMI,2003.
cv::Mat buf_; */
CV_WRAP virtual double getShadowThreshold() const = 0;
/** @brief Sets the shadow threshold
*/
CV_WRAP virtual void setShadowThreshold(double threshold) = 0;
}; };
} /** @brief Creates KNN Background Subtractor
@param history Length of the history.
@param dist2Threshold Threshold on the squared distance between the pixel and the sample to decide
whether a pixel is close to that sample. This parameter does not affect the background update.
@param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the
speed a bit, so if you do not need this feature, set the parameter to false.
*/
CV_EXPORTS_W Ptr<BackgroundSubtractorKNN>
createBackgroundSubtractorKNN(int history=500, double dist2Threshold=400.0,
bool detectShadows=true);
//! @} video_motion
} // cv
#endif #endif

View File

@ -1,7 +1,3 @@
/*! \file tracking.hpp
\brief The Object and Feature Tracking
*/
/*M/////////////////////////////////////////////////////////////////////////////////////// /*M///////////////////////////////////////////////////////////////////////////////////////
// //
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
@ -11,11 +7,12 @@
// copy or use the software. // copy or use the software.
// //
// //
// License Agreement // License Agreement
// For Open Source Computer Vision Library // For Open Source Computer Vision Library
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -44,249 +41,365 @@
// //
//M*/ //M*/
#ifndef __OPENCV_TRACKING_HPP__ #ifndef OPENCV_TRACKING_HPP
#define __OPENCV_TRACKING_HPP__ #define OPENCV_TRACKING_HPP
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/imgproc.hpp"
#ifdef __cplusplus
extern "C" {
#endif
/****************************************************************************************\
* Motion Analysis *
\****************************************************************************************/
/************************************ optical flow ***************************************/
#define CV_LKFLOW_PYR_A_READY 1
#define CV_LKFLOW_PYR_B_READY 2
#define CV_LKFLOW_INITIAL_GUESSES 4
#define CV_LKFLOW_GET_MIN_EIGENVALS 8
/* It is Lucas & Kanade method, modified to use pyramids.
Also it does several iterations to get optical flow for
every point at every pyramid level.
Calculates optical flow between two images for certain set of points (i.e.
it is a "sparse" optical flow, which is opposite to the previous 3 methods) */
CVAPI(void) cvCalcOpticalFlowPyrLK( const CvArr* prev, const CvArr* curr,
CvArr* prev_pyr, CvArr* curr_pyr,
const CvPoint2D32f* prev_features,
CvPoint2D32f* curr_features,
int count,
CvSize win_size,
int level,
char* status,
float* track_error,
CvTermCriteria criteria,
int flags );
/* Modification of a previous sparse optical flow algorithm to calculate
affine flow */
CVAPI(void) cvCalcAffineFlowPyrLK( const CvArr* prev, const CvArr* curr,
CvArr* prev_pyr, CvArr* curr_pyr,
const CvPoint2D32f* prev_features,
CvPoint2D32f* curr_features,
float* matrices, int count,
CvSize win_size, int level,
char* status, float* track_error,
CvTermCriteria criteria, int flags );
/* Estimate rigid transformation between 2 images or 2 point sets */
CVAPI(int) cvEstimateRigidTransform( const CvArr* A, const CvArr* B,
CvMat* M, int full_affine );
/* Estimate optical flow for each pixel using the two-frame G. Farneback algorithm */
CVAPI(void) cvCalcOpticalFlowFarneback( const CvArr* prev, const CvArr* next,
CvArr* flow, double pyr_scale, int levels,
int winsize, int iterations, int poly_n,
double poly_sigma, int flags );
/********************************* motion templates *************************************/
/****************************************************************************************\
* All the motion template functions work only with single channel images. *
* Silhouette image must have depth IPL_DEPTH_8U or IPL_DEPTH_8S *
* Motion history image must have depth IPL_DEPTH_32F, *
* Gradient mask - IPL_DEPTH_8U or IPL_DEPTH_8S, *
* Motion orientation image - IPL_DEPTH_32F *
* Segmentation mask - IPL_DEPTH_32F *
* All the angles are in degrees, all the times are in milliseconds *
\****************************************************************************************/
/* Updates motion history image given motion silhouette */
CVAPI(void) cvUpdateMotionHistory( const CvArr* silhouette, CvArr* mhi,
double timestamp, double duration );
/* Calculates gradient of the motion history image and fills
a mask indicating where the gradient is valid */
CVAPI(void) cvCalcMotionGradient( const CvArr* mhi, CvArr* mask, CvArr* orientation,
double delta1, double delta2,
int aperture_size CV_DEFAULT(3));
/* Calculates average motion direction within a selected motion region
(region can be selected by setting ROIs and/or by composing a valid gradient mask
with the region mask) */
CVAPI(double) cvCalcGlobalOrientation( const CvArr* orientation, const CvArr* mask,
const CvArr* mhi, double timestamp,
double duration );
/* Splits a motion history image into a few parts corresponding to separate independent motions
(e.g. left hand, right hand) */
CVAPI(CvSeq*) cvSegmentMotion( const CvArr* mhi, CvArr* seg_mask,
CvMemStorage* storage,
double timestamp, double seg_thresh );
/****************************************************************************************\
* Tracking *
\****************************************************************************************/
/* Implements CAMSHIFT algorithm - determines object position, size and orientation
from the object histogram back project (extension of meanshift) */
CVAPI(int) cvCamShift( const CvArr* prob_image, CvRect window,
CvTermCriteria criteria, CvConnectedComp* comp,
CvBox2D* box CV_DEFAULT(NULL) );
/* Implements MeanShift algorithm - determines object position
from the object histogram back project */
CVAPI(int) cvMeanShift( const CvArr* prob_image, CvRect window,
CvTermCriteria criteria, CvConnectedComp* comp );
/*
standard Kalman filter (in G. Welch' and G. Bishop's notation):
x(k)=A*x(k-1)+B*u(k)+w(k) p(w)~N(0,Q)
z(k)=H*x(k)+v(k), p(v)~N(0,R)
*/
typedef struct CvKalman
{
int MP; /* number of measurement vector dimensions */
int DP; /* number of state vector dimensions */
int CP; /* number of control vector dimensions */
/* backward compatibility fields */
#if 1
float* PosterState; /* =state_pre->data.fl */
float* PriorState; /* =state_post->data.fl */
float* DynamMatr; /* =transition_matrix->data.fl */
float* MeasurementMatr; /* =measurement_matrix->data.fl */
float* MNCovariance; /* =measurement_noise_cov->data.fl */
float* PNCovariance; /* =process_noise_cov->data.fl */
float* KalmGainMatr; /* =gain->data.fl */
float* PriorErrorCovariance;/* =error_cov_pre->data.fl */
float* PosterErrorCovariance;/* =error_cov_post->data.fl */
float* Temp1; /* temp1->data.fl */
float* Temp2; /* temp2->data.fl */
#endif
CvMat* state_pre; /* predicted state (x'(k)):
x(k)=A*x(k-1)+B*u(k) */
CvMat* state_post; /* corrected state (x(k)):
x(k)=x'(k)+K(k)*(z(k)-H*x'(k)) */
CvMat* transition_matrix; /* state transition matrix (A) */
CvMat* control_matrix; /* control matrix (B)
(it is not used if there is no control)*/
CvMat* measurement_matrix; /* measurement matrix (H) */
CvMat* process_noise_cov; /* process noise covariance matrix (Q) */
CvMat* measurement_noise_cov; /* measurement noise covariance matrix (R) */
CvMat* error_cov_pre; /* priori error estimate covariance matrix (P'(k)):
P'(k)=A*P(k-1)*At + Q)*/
CvMat* gain; /* Kalman gain matrix (K(k)):
K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)*/
CvMat* error_cov_post; /* posteriori error estimate covariance matrix (P(k)):
P(k)=(I-K(k)*H)*P'(k) */
CvMat* temp1; /* temporary matrices */
CvMat* temp2;
CvMat* temp3;
CvMat* temp4;
CvMat* temp5;
} CvKalman;
/* Creates Kalman filter and sets A, B, Q, R and state to some initial values */
CVAPI(CvKalman*) cvCreateKalman( int dynam_params, int measure_params,
int control_params CV_DEFAULT(0));
/* Releases Kalman filter state */
CVAPI(void) cvReleaseKalman( CvKalman** kalman);
/* Updates Kalman filter by time (predicts future state of the system) */
CVAPI(const CvMat*) cvKalmanPredict( CvKalman* kalman,
const CvMat* control CV_DEFAULT(NULL));
/* Updates Kalman filter by measurement
(corrects state of the system and internal matrices) */
CVAPI(const CvMat*) cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement );
#define cvKalmanUpdateByTime cvKalmanPredict
#define cvKalmanUpdateByMeasurement cvKalmanCorrect
#ifdef __cplusplus
}
namespace cv namespace cv
{ {
//! updates motion history image using the current silhouette //! @addtogroup video_track
CV_EXPORTS_W void updateMotionHistory( InputArray silhouette, InputOutputArray mhi, //! @{
double timestamp, double duration );
//! computes the motion gradient orientation image from the motion history image enum { OPTFLOW_USE_INITIAL_FLOW = 4,
CV_EXPORTS_W void calcMotionGradient( InputArray mhi, OutputArray mask, OPTFLOW_LK_GET_MIN_EIGENVALS = 8,
OutputArray orientation, OPTFLOW_FARNEBACK_GAUSSIAN = 256
double delta1, double delta2, };
int apertureSize=3 );
//! computes the global orientation of the selected motion history image part /** @brief Finds an object center, size, and orientation.
CV_EXPORTS_W double calcGlobalOrientation( InputArray orientation, InputArray mask,
InputArray mhi, double timestamp,
double duration );
CV_EXPORTS_W void segmentMotion(InputArray mhi, OutputArray segmask, @param probImage Back projection of the object histogram. See calcBackProject.
CV_OUT vector<Rect>& boundingRects, @param window Initial search window.
double timestamp, double segThresh); @param criteria Stop criteria for the underlying meanShift.
returns
(in old interfaces) Number of iterations CAMSHIFT took to converge
The function implements the CAMSHIFT object tracking algorithm @cite Bradski98 . First, it finds an
object center using meanShift and then adjusts the window size and finds the optimal rotation. The
function returns the rotated rectangle structure that includes the object position, size, and
orientation. The next position of the search window can be obtained with RotatedRect::boundingRect()
//! updates the object tracking window using CAMSHIFT algorithm See the OpenCV sample camshiftdemo.c that tracks colored objects.
CV_EXPORTS_W RotatedRect CamShift( InputArray probImage, CV_OUT CV_IN_OUT Rect& window,
@note
- (Python) A sample explaining the camshift tracking algorithm can be found at
opencv_source_code/samples/python/camshift.py
*/
CV_EXPORTS_W RotatedRect CamShift( InputArray probImage, CV_IN_OUT Rect& window,
TermCriteria criteria ); TermCriteria criteria );
/** @example samples/cpp/camshiftdemo.cpp
//! updates the object tracking window using meanshift algorithm An example using the mean-shift tracking algorithm
CV_EXPORTS_W int meanShift( InputArray probImage, CV_OUT CV_IN_OUT Rect& window,
TermCriteria criteria );
/*!
Kalman filter.
The class implements standard Kalman filter \url{http://en.wikipedia.org/wiki/Kalman_filter}.
However, you can modify KalmanFilter::transitionMatrix, KalmanFilter::controlMatrix and
KalmanFilter::measurementMatrix to get the extended Kalman filter functionality.
*/ */
/** @brief Finds an object on a back projection image.
@param probImage Back projection of the object histogram. See calcBackProject for details.
@param window Initial search window.
@param criteria Stop criteria for the iterative search algorithm.
returns
: Number of iterations CAMSHIFT took to converge.
The function implements the iterative object search algorithm. It takes the input back projection of
an object and the initial position. The mass center in window of the back projection image is
computed and the search window center shifts to the mass center. The procedure is repeated until the
specified number of iterations criteria.maxCount is done or until the window center shifts by less
than criteria.epsilon. The algorithm is used inside CamShift and, unlike CamShift , the search
window size or orientation do not change during the search. You can simply pass the output of
calcBackProject to this function. But better results can be obtained if you pre-filter the back
projection and remove the noise. For example, you can do this by retrieving connected components
with findContours , throwing away contours with small area ( contourArea ), and rendering the
remaining contours with drawContours.
*/
CV_EXPORTS_W int meanShift( InputArray probImage, CV_IN_OUT Rect& window, TermCriteria criteria );
/** @brief Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
@param img 8-bit input image.
@param pyramid output pyramid.
@param winSize window size of optical flow algorithm. Must be not less than winSize argument of
calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.
@param maxLevel 0-based maximal pyramid level number.
@param withDerivatives set to precompute gradients for the every pyramid level. If pyramid is
constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.
@param pyrBorder the border mode for pyramid layers.
@param derivBorder the border mode for gradients.
@param tryReuseInputImage put ROI of input image into the pyramid if possible. You can pass false
to force data copying.
@return number of levels in constructed pyramid. Can be less than maxLevel.
*/
CV_EXPORTS_W int buildOpticalFlowPyramid( InputArray img, OutputArrayOfArrays pyramid,
Size winSize, int maxLevel, bool withDerivatives = true,
int pyrBorder = BORDER_REFLECT_101,
int derivBorder = BORDER_CONSTANT,
bool tryReuseInputImage = true );
/** @example samples/cpp/lkdemo.cpp
An example using the Lucas-Kanade optical flow algorithm
*/
/** @brief Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
pyramids.
@param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
@param nextImg second input image or pyramid of the same size and the same type as prevImg.
@param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
single-precision floating-point numbers.
@param nextPts output vector of 2D points (with single-precision floating-point coordinates)
containing the calculated new positions of input features in the second image; when
OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
@param status output status vector (of unsigned chars); each element of the vector is set to 1 if
the flow for the corresponding features has been found, otherwise, it is set to 0.
@param err output vector of errors; each element of the vector is set to an error for the
corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
found then the error is not defined (use the status parameter to find such cases).
@param winSize size of the search window at each pyramid level.
@param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single
level), if set to 1, two levels are used, and so on; if pyramids are passed to input then
algorithm will use as many levels as pyramids have but no more than maxLevel.
@param criteria parameter, specifying the termination criteria of the iterative search algorithm
(after the specified maximum number of iterations criteria.maxCount or when the search window
moves by less than criteria.epsilon.
@param flags operation flags:
- **OPTFLOW_USE_INITIAL_FLOW** uses initial estimations, stored in nextPts; if the flag is
not set, then prevPts is copied to nextPts and is considered the initial estimate.
- **OPTFLOW_LK_GET_MIN_EIGENVALS** use minimum eigen values as an error measure (see
minEigThreshold description); if the flag is not set, then L1 distance between patches
around the original and a moved point, divided by number of pixels in a window, is used as a
error measure.
@param minEigThreshold the algorithm calculates the minimum eigen value of a 2x2 normal matrix of
optical flow equations (this matrix is called a spatial gradient matrix in @cite Bouguet00), divided
by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
performance boost.
The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
@cite Bouguet00 . The function is parallelized with the TBB library.
@note
- An example using the Lucas-Kanade optical flow algorithm can be found at
opencv_source_code/samples/cpp/lkdemo.cpp
- (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
opencv_source_code/samples/python/lk_track.py
- (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
opencv_source_code/samples/python/lk_homography.py
*/
CV_EXPORTS_W void calcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg,
InputArray prevPts, InputOutputArray nextPts,
OutputArray status, OutputArray err,
Size winSize = Size(21,21), int maxLevel = 3,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01),
int flags = 0, double minEigThreshold = 1e-4 );
/** @brief Computes a dense optical flow using the Gunnar Farneback's algorithm.
@param prev first 8-bit single-channel input image.
@param next second input image of the same size and the same type as prev.
@param flow computed flow image that has the same size as prev and type CV_32FC2.
@param pyr_scale parameter, specifying the image scale (\<1) to build pyramids for each image;
pyr_scale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous
one.
@param levels number of pyramid layers including the initial image; levels=1 means that no extra
layers are created and only the original images are used.
@param winsize averaging window size; larger values increase the algorithm robustness to image
noise and give more chances for fast motion detection, but yield more blurred motion field.
@param iterations number of iterations the algorithm does at each pyramid level.
@param poly_n size of the pixel neighborhood used to find polynomial expansion in each pixel;
larger values mean that the image will be approximated with smoother surfaces, yielding more
robust algorithm and more blurred motion field, typically poly_n =5 or 7.
@param poly_sigma standard deviation of the Gaussian that is used to smooth derivatives used as a
basis for the polynomial expansion; for poly_n=5, you can set poly_sigma=1.1, for poly_n=7, a
good value would be poly_sigma=1.5.
@param flags operation flags that can be a combination of the following:
- **OPTFLOW_USE_INITIAL_FLOW** uses the input flow as an initial flow approximation.
- **OPTFLOW_FARNEBACK_GAUSSIAN** uses the Gaussian \f$\texttt{winsize}\times\texttt{winsize}\f$
filter instead of a box filter of the same size for optical flow estimation; usually, this
option gives z more accurate flow than with a box filter, at the cost of lower speed;
normally, winsize for a Gaussian window should be set to a larger value to achieve the same
level of robustness.
The function finds an optical flow for each prev pixel using the @cite Farneback2003 algorithm so that
\f[\texttt{prev} (y,x) \sim \texttt{next} ( y + \texttt{flow} (y,x)[1], x + \texttt{flow} (y,x)[0])\f]
@note
- An example using the optical flow algorithm described by Gunnar Farneback can be found at
opencv_source_code/samples/cpp/fback.cpp
- (Python) An example using the optical flow algorithm described by Gunnar Farneback can be
found at opencv_source_code/samples/python/opt_flow.py
*/
CV_EXPORTS_W void calcOpticalFlowFarneback( InputArray prev, InputArray next, InputOutputArray flow,
double pyr_scale, int levels, int winsize,
int iterations, int poly_n, double poly_sigma,
int flags );
/** @brief Computes an optimal affine transformation between two 2D point sets.
@param src First input 2D point set stored in std::vector or Mat, or an image stored in Mat.
@param dst Second input 2D point set of the same size and the same type as A, or another image.
@param fullAffine If true, the function finds an optimal affine transformation with no additional
restrictions (6 degrees of freedom). Otherwise, the class of transformations to choose from is
limited to combinations of translation, rotation, and uniform scaling (4 degrees of freedom).
The function finds an optimal affine transform *[A|b]* (a 2 x 3 floating-point matrix) that
approximates best the affine transformation between:
* Two point sets
* Two raster images. In this case, the function first finds some features in the src image and
finds the corresponding features in dst image. After that, the problem is reduced to the first
case.
In case of point sets, the problem is formulated as follows: you need to find a 2x2 matrix *A* and
2x1 vector *b* so that:
\f[[A^*|b^*] = arg \min _{[A|b]} \sum _i \| \texttt{dst}[i] - A { \texttt{src}[i]}^T - b \| ^2\f]
where src[i] and dst[i] are the i-th points in src and dst, respectively
\f$[A|b]\f$ can be either arbitrary (when fullAffine=true ) or have a form of
\f[\begin{bmatrix} a_{11} & a_{12} & b_1 \\ -a_{12} & a_{11} & b_2 \end{bmatrix}\f]
when fullAffine=false.
@sa
estimateAffine2D, estimateAffinePartial2D, getAffineTransform, getPerspectiveTransform, findHomography
*/
CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst, bool fullAffine);
CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst, bool fullAffine, int ransacMaxIters, double ransacGoodRatio,
int ransacSize0);
enum
{
MOTION_TRANSLATION = 0,
MOTION_EUCLIDEAN = 1,
MOTION_AFFINE = 2,
MOTION_HOMOGRAPHY = 3
};
/** @brief Computes the Enhanced Correlation Coefficient value between two images @cite EP08 .
@param templateImage single-channel template image; CV_8U or CV_32F array.
@param inputImage single-channel input image to be warped to provide an image similar to
templateImage, same type as templateImage.
@param inputMask An optional mask to indicate valid values of inputImage.
@sa
findTransformECC
*/
CV_EXPORTS_W double computeECC(InputArray templateImage, InputArray inputImage, InputArray inputMask = noArray());
/** @example samples/cpp/image_alignment.cpp
An example using the image alignment ECC algorithm
*/
/** @brief Finds the geometric transform (warp) between two images in terms of the ECC criterion @cite EP08 .
@param templateImage single-channel template image; CV_8U or CV_32F array.
@param inputImage single-channel input image which should be warped with the final warpMatrix in
order to provide an image similar to templateImage, same type as templateImage.
@param warpMatrix floating-point \f$2\times 3\f$ or \f$3\times 3\f$ mapping matrix (warp).
@param motionType parameter, specifying the type of motion:
- **MOTION_TRANSLATION** sets a translational motion model; warpMatrix is \f$2\times 3\f$ with
the first \f$2\times 2\f$ part being the unity matrix and the rest two parameters being
estimated.
- **MOTION_EUCLIDEAN** sets a Euclidean (rigid) transformation as motion model; three
parameters are estimated; warpMatrix is \f$2\times 3\f$.
- **MOTION_AFFINE** sets an affine motion model (DEFAULT); six parameters are estimated;
warpMatrix is \f$2\times 3\f$.
- **MOTION_HOMOGRAPHY** sets a homography as a motion model; eight parameters are
estimated;\`warpMatrix\` is \f$3\times 3\f$.
@param criteria parameter, specifying the termination criteria of the ECC algorithm;
criteria.epsilon defines the threshold of the increment in the correlation coefficient between two
iterations (a negative criteria.epsilon makes criteria.maxcount the only termination criterion).
Default values are shown in the declaration above.
@param inputMask An optional mask to indicate valid values of inputImage.
@param gaussFiltSize An optional value indicating size of gaussian blur filter; (DEFAULT: 5)
The function estimates the optimum transformation (warpMatrix) with respect to ECC criterion
(@cite EP08), that is
\f[\texttt{warpMatrix} = \texttt{warpMatrix} = \arg\max_{W} \texttt{ECC}(\texttt{templateImage}(x,y),\texttt{inputImage}(x',y'))\f]
where
\f[\begin{bmatrix} x' \\ y' \end{bmatrix} = W \cdot \begin{bmatrix} x \\ y \\ 1 \end{bmatrix}\f]
(the equation holds with homogeneous coordinates for homography). It returns the final enhanced
correlation coefficient, that is the correlation coefficient between the template image and the
final warped input image. When a \f$3\times 3\f$ matrix is given with motionType =0, 1 or 2, the third
row is ignored.
Unlike findHomography and estimateRigidTransform, the function findTransformECC implements an
area-based alignment that builds on intensity similarities. In essence, the function updates the
initial transformation that roughly aligns the images. If this information is missing, the identity
warp (unity matrix) is used as an initialization. Note that if images undergo strong
displacements/rotations, an initial transformation that roughly aligns the images is necessary
(e.g., a simple euclidean/similarity transform that allows for the images showing the same image
content approximately). Use inverse warping in the second image to take an image close to the first
one, i.e. use the flag WARP_INVERSE_MAP with warpAffine or warpPerspective. See also the OpenCV
sample image_alignment.cpp that demonstrates the use of the function. Note that the function throws
an exception if algorithm does not converges.
@sa
computeECC, estimateAffine2D, estimateAffinePartial2D, findHomography
*/
CV_EXPORTS_W double findTransformECC( InputArray templateImage, InputArray inputImage,
InputOutputArray warpMatrix, int motionType,
TermCriteria criteria,
InputArray inputMask, int gaussFiltSize);
/** @overload */
CV_EXPORTS
double findTransformECC(InputArray templateImage, InputArray inputImage,
InputOutputArray warpMatrix, int motionType = MOTION_AFFINE,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 50, 0.001),
InputArray inputMask = noArray());
/** @example samples/cpp/kalman.cpp
An example using the standard Kalman filter
*/
/** @brief Kalman filter class.
The class implements a standard Kalman filter <http://en.wikipedia.org/wiki/Kalman_filter>,
@cite Welch95 . However, you can modify transitionMatrix, controlMatrix, and measurementMatrix to get
an extended Kalman filter functionality.
@note In C API when CvKalman\* kalmanFilter structure is not needed anymore, it should be released
with cvReleaseKalman(&kalmanFilter)
*/
class CV_EXPORTS_W KalmanFilter class CV_EXPORTS_W KalmanFilter
{ {
public: public:
//! the default constructor
CV_WRAP KalmanFilter(); CV_WRAP KalmanFilter();
//! the full constructor taking the dimensionality of the state, of the measurement and of the control vector /** @overload
CV_WRAP KalmanFilter(int dynamParams, int measureParams, int controlParams=0, int type=CV_32F); @param dynamParams Dimensionality of the state.
//! re-initializes Kalman filter. The previous content is destroyed. @param measureParams Dimensionality of the measurement.
void init(int dynamParams, int measureParams, int controlParams=0, int type=CV_32F); @param controlParams Dimensionality of the control vector.
@param type Type of the created matrices that should be CV_32F or CV_64F.
*/
CV_WRAP KalmanFilter( int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F );
//! computes predicted state /** @brief Re-initializes Kalman filter. The previous content is destroyed.
CV_WRAP const Mat& predict(const Mat& control=Mat());
//! updates the predicted state from the measurement
CV_WRAP const Mat& correct(const Mat& measurement);
Mat statePre; //!< predicted state (x'(k)): x(k)=A*x(k-1)+B*u(k) @param dynamParams Dimensionality of the state.
Mat statePost; //!< corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k)) @param measureParams Dimensionality of the measurement.
Mat transitionMatrix; //!< state transition matrix (A) @param controlParams Dimensionality of the control vector.
Mat controlMatrix; //!< control matrix (B) (not used if there is no control) @param type Type of the created matrices that should be CV_32F or CV_64F.
Mat measurementMatrix; //!< measurement matrix (H) */
Mat processNoiseCov; //!< process noise covariance matrix (Q) void init( int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F );
Mat measurementNoiseCov;//!< measurement noise covariance matrix (R)
Mat errorCovPre; //!< priori error estimate covariance matrix (P'(k)): P'(k)=A*P(k-1)*At + Q)*/ /** @brief Computes a predicted state.
Mat gain; //!< Kalman gain matrix (K(k)): K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)
Mat errorCovPost; //!< posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k) @param control The optional input control
*/
CV_WRAP const Mat& predict( const Mat& control = Mat() );
/** @brief Updates the predicted state from the measurement.
@param measurement The measured system parameters
*/
CV_WRAP const Mat& correct( const Mat& measurement );
CV_PROP_RW Mat statePre; //!< predicted state (x'(k)): x(k)=A*x(k-1)+B*u(k)
CV_PROP_RW Mat statePost; //!< corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k))
CV_PROP_RW Mat transitionMatrix; //!< state transition matrix (A)
CV_PROP_RW Mat controlMatrix; //!< control matrix (B) (not used if there is no control)
CV_PROP_RW Mat measurementMatrix; //!< measurement matrix (H)
CV_PROP_RW Mat processNoiseCov; //!< process noise covariance matrix (Q)
CV_PROP_RW Mat measurementNoiseCov;//!< measurement noise covariance matrix (R)
CV_PROP_RW Mat errorCovPre; //!< priori error estimate covariance matrix (P'(k)): P'(k)=A*P(k-1)*At + Q)*/
CV_PROP_RW Mat gain; //!< Kalman gain matrix (K(k)): K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)
CV_PROP_RW Mat errorCovPost; //!< posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k)
// temporary matrices // temporary matrices
Mat temp1; Mat temp1;
@ -296,78 +409,246 @@ public:
Mat temp5; Mat temp5;
}; };
enum
{
OPTFLOW_USE_INITIAL_FLOW = CV_LKFLOW_INITIAL_GUESSES,
OPTFLOW_LK_GET_MIN_EIGENVALS = CV_LKFLOW_GET_MIN_EIGENVALS,
OPTFLOW_FARNEBACK_GAUSSIAN = 256
};
//! constructs a pyramid which can be used as input for calcOpticalFlowPyrLK class CV_EXPORTS_W DenseOpticalFlow : public Algorithm
CV_EXPORTS_W int buildOpticalFlowPyramid(InputArray img, OutputArrayOfArrays pyramid,
Size winSize, int maxLevel, bool withDerivatives = true,
int pyrBorder = BORDER_REFLECT_101, int derivBorder = BORDER_CONSTANT,
bool tryReuseInputImage = true);
//! computes sparse optical flow using multi-scale Lucas-Kanade algorithm
CV_EXPORTS_W void calcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg,
InputArray prevPts, CV_OUT InputOutputArray nextPts,
OutputArray status, OutputArray err,
Size winSize=Size(21,21), int maxLevel=3,
TermCriteria criteria=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01),
int flags=0, double minEigThreshold=1e-4);
//! computes dense optical flow using Farneback algorithm
CV_EXPORTS_W void calcOpticalFlowFarneback( InputArray prev, InputArray next,
CV_OUT InputOutputArray flow, double pyr_scale, int levels, int winsize,
int iterations, int poly_n, double poly_sigma, int flags );
//! estimates the best-fit Euqcidean, similarity, affine or perspective transformation
// that maps one 2D point set to another or one image to another.
CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst,
bool fullAffine);
//! computes dense optical flow using Simple Flow algorithm
CV_EXPORTS_W void calcOpticalFlowSF(Mat& from,
Mat& to,
Mat& flow,
int layers,
int averaging_block_size,
int max_flow);
CV_EXPORTS_W void calcOpticalFlowSF(Mat& from,
Mat& to,
Mat& flow,
int layers,
int averaging_block_size,
int max_flow,
double sigma_dist,
double sigma_color,
int postprocess_window,
double sigma_dist_fix,
double sigma_color_fix,
double occ_thr,
int upscale_averaging_radius,
double upscale_sigma_dist,
double upscale_sigma_color,
double speed_up_thr);
class CV_EXPORTS DenseOpticalFlow : public Algorithm
{ {
public: public:
virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow) = 0; /** @brief Calculates an optical flow.
virtual void collectGarbage() = 0;
@param I0 first 8-bit single-channel input image.
@param I1 second input image of the same size and the same type as prev.
@param flow computed flow image that has the same size as prev and type CV_32FC2.
*/
CV_WRAP virtual void calc( InputArray I0, InputArray I1, InputOutputArray flow ) = 0;
/** @brief Releases all inner buffers.
*/
CV_WRAP virtual void collectGarbage() = 0;
}; };
// Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method /** @brief Base interface for sparse optical flow algorithms.
// */
// see reference: class CV_EXPORTS_W SparseOpticalFlow : public Algorithm
// [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow". {
// [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation". public:
CV_EXPORTS Ptr<DenseOpticalFlow> createOptFlow_DualTVL1(); /** @brief Calculates a sparse optical flow.
} @param prevImg First input image.
@param nextImg Second input image of the same size and the same type as prevImg.
#endif @param prevPts Vector of 2D points for which the flow needs to be found.
@param nextPts Output vector of 2D points containing the calculated new positions of input features in the second image.
@param status Output status vector. Each element of the vector is set to 1 if the
flow for the corresponding features has been found. Otherwise, it is set to 0.
@param err Optional output vector that contains error response for each point (inverse confidence).
*/
CV_WRAP virtual void calc(InputArray prevImg, InputArray nextImg,
InputArray prevPts, InputOutputArray nextPts,
OutputArray status,
OutputArray err = cv::noArray()) = 0;
};
/** @brief "Dual TV L1" Optical Flow Algorithm.
The class implements the "Dual TV L1" optical flow algorithm described in @cite Zach2007 and
@cite Javier2012 .
Here are important members of the class that control the algorithm, which you can set after
constructing the class instance:
- member double tau
Time step of the numerical scheme.
- member double lambda
Weight parameter for the data term, attachment parameter. This is the most relevant
parameter, which determines the smoothness of the output. The smaller this parameter is,
the smoother the solutions we obtain. It depends on the range of motions of the images, so
its value should be adapted to each image sequence.
- member double theta
Weight parameter for (u - v)\^2, tightness parameter. It serves as a link between the
attachment and the regularization terms. In theory, it should have a small value in order
to maintain both parts in correspondence. The method is stable for a large range of values
of this parameter.
- member int nscales
Number of scales used to create the pyramid of images.
- member int warps
Number of warpings per scale. Represents the number of times that I1(x+u0) and grad(
I1(x+u0) ) are computed per scale. This is a parameter that assures the stability of the
method. It also affects the running time, so it is a compromise between speed and
accuracy.
- member double epsilon
Stopping criterion threshold used in the numerical scheme, which is a trade-off between
precision and running time. A small value will yield more accurate solutions at the
expense of a slower convergence.
- member int iterations
Stopping criterion iterations number used in the numerical scheme.
C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
*/
class CV_EXPORTS_W DualTVL1OpticalFlow : public DenseOpticalFlow
{
public:
//! @brief Time step of the numerical scheme
/** @see setTau */
CV_WRAP virtual double getTau() const = 0;
/** @copybrief getTau @see getTau */
CV_WRAP virtual void setTau(double val) = 0;
//! @brief Weight parameter for the data term, attachment parameter
/** @see setLambda */
CV_WRAP virtual double getLambda() const = 0;
/** @copybrief getLambda @see getLambda */
CV_WRAP virtual void setLambda(double val) = 0;
//! @brief Weight parameter for (u - v)^2, tightness parameter
/** @see setTheta */
CV_WRAP virtual double getTheta() const = 0;
/** @copybrief getTheta @see getTheta */
CV_WRAP virtual void setTheta(double val) = 0;
//! @brief coefficient for additional illumination variation term
/** @see setGamma */
CV_WRAP virtual double getGamma() const = 0;
/** @copybrief getGamma @see getGamma */
CV_WRAP virtual void setGamma(double val) = 0;
//! @brief Number of scales used to create the pyramid of images
/** @see setScalesNumber */
CV_WRAP virtual int getScalesNumber() const = 0;
/** @copybrief getScalesNumber @see getScalesNumber */
CV_WRAP virtual void setScalesNumber(int val) = 0;
//! @brief Number of warpings per scale
/** @see setWarpingsNumber */
CV_WRAP virtual int getWarpingsNumber() const = 0;
/** @copybrief getWarpingsNumber @see getWarpingsNumber */
CV_WRAP virtual void setWarpingsNumber(int val) = 0;
//! @brief Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time
/** @see setEpsilon */
CV_WRAP virtual double getEpsilon() const = 0;
/** @copybrief getEpsilon @see getEpsilon */
CV_WRAP virtual void setEpsilon(double val) = 0;
//! @brief Inner iterations (between outlier filtering) used in the numerical scheme
/** @see setInnerIterations */
CV_WRAP virtual int getInnerIterations() const = 0;
/** @copybrief getInnerIterations @see getInnerIterations */
CV_WRAP virtual void setInnerIterations(int val) = 0;
//! @brief Outer iterations (number of inner loops) used in the numerical scheme
/** @see setOuterIterations */
CV_WRAP virtual int getOuterIterations() const = 0;
/** @copybrief getOuterIterations @see getOuterIterations */
CV_WRAP virtual void setOuterIterations(int val) = 0;
//! @brief Use initial flow
/** @see setUseInitialFlow */
CV_WRAP virtual bool getUseInitialFlow() const = 0;
/** @copybrief getUseInitialFlow @see getUseInitialFlow */
CV_WRAP virtual void setUseInitialFlow(bool val) = 0;
//! @brief Step between scales (<1)
/** @see setScaleStep */
CV_WRAP virtual double getScaleStep() const = 0;
/** @copybrief getScaleStep @see getScaleStep */
CV_WRAP virtual void setScaleStep(double val) = 0;
//! @brief Median filter kernel size (1 = no filter) (3 or 5)
/** @see setMedianFiltering */
CV_WRAP virtual int getMedianFiltering() const = 0;
/** @copybrief getMedianFiltering @see getMedianFiltering */
CV_WRAP virtual void setMedianFiltering(int val) = 0;
/** @brief Creates instance of cv::DualTVL1OpticalFlow*/
CV_WRAP static Ptr<DualTVL1OpticalFlow> create(
double tau = 0.25,
double lambda = 0.15,
double theta = 0.3,
int nscales = 5,
int warps = 5,
double epsilon = 0.01,
int innnerIterations = 30,
int outerIterations = 10,
double scaleStep = 0.8,
double gamma = 0.0,
int medianFiltering = 5,
bool useInitialFlow = false);
};
/** @brief Creates instance of cv::DenseOpticalFlow
*/
CV_EXPORTS_W Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1();
/** @brief Class computing a dense optical flow using the Gunnar Farneback's algorithm.
*/
class CV_EXPORTS_W FarnebackOpticalFlow : public DenseOpticalFlow
{
public:
CV_WRAP virtual int getNumLevels() const = 0;
CV_WRAP virtual void setNumLevels(int numLevels) = 0;
CV_WRAP virtual double getPyrScale() const = 0;
CV_WRAP virtual void setPyrScale(double pyrScale) = 0;
CV_WRAP virtual bool getFastPyramids() const = 0;
CV_WRAP virtual void setFastPyramids(bool fastPyramids) = 0;
CV_WRAP virtual int getWinSize() const = 0;
CV_WRAP virtual void setWinSize(int winSize) = 0;
CV_WRAP virtual int getNumIters() const = 0;
CV_WRAP virtual void setNumIters(int numIters) = 0;
CV_WRAP virtual int getPolyN() const = 0;
CV_WRAP virtual void setPolyN(int polyN) = 0;
CV_WRAP virtual double getPolySigma() const = 0;
CV_WRAP virtual void setPolySigma(double polySigma) = 0;
CV_WRAP virtual int getFlags() const = 0;
CV_WRAP virtual void setFlags(int flags) = 0;
CV_WRAP static Ptr<FarnebackOpticalFlow> create(
int numLevels = 5,
double pyrScale = 0.5,
bool fastPyramids = false,
int winSize = 13,
int numIters = 10,
int polyN = 5,
double polySigma = 1.1,
int flags = 0);
};
/** @brief Class used for calculating a sparse optical flow.
The class can calculate an optical flow for a sparse feature set using the
iterative Lucas-Kanade method with pyramids.
@sa calcOpticalFlowPyrLK
*/
class CV_EXPORTS_W SparsePyrLKOpticalFlow : public SparseOpticalFlow
{
public:
CV_WRAP virtual Size getWinSize() const = 0;
CV_WRAP virtual void setWinSize(Size winSize) = 0;
CV_WRAP virtual int getMaxLevel() const = 0;
CV_WRAP virtual void setMaxLevel(int maxLevel) = 0;
CV_WRAP virtual TermCriteria getTermCriteria() const = 0;
CV_WRAP virtual void setTermCriteria(TermCriteria& crit) = 0;
CV_WRAP virtual int getFlags() const = 0;
CV_WRAP virtual void setFlags(int flags) = 0;
CV_WRAP virtual double getMinEigThreshold() const = 0;
CV_WRAP virtual void setMinEigThreshold(double minEigThreshold) = 0;
CV_WRAP static Ptr<SparsePyrLKOpticalFlow> create(
Size winSize = Size(21, 21),
int maxLevel = 3, TermCriteria crit =
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01),
int flags = 0,
double minEigThreshold = 1e-4);
};
//! @} video_track
} // cv
#endif #endif

View File

@ -7,11 +7,12 @@
// copy or use the software. // copy or use the software.
// //
// //
// License Agreement // License Agreement
// For Open Source Computer Vision Library // For Open Source Computer Vision Library
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -40,19 +41,8 @@
// //
//M*/ //M*/
#ifndef __OPENCV_VIDEO_HPP__ #ifdef __OPENCV_BUILD
#define __OPENCV_VIDEO_HPP__ #error this is a compatibility header which should not be used inside the OpenCV library
#include "opencv2/video/tracking.hpp"
#include "opencv2/video/background_segm.hpp"
#ifdef __cplusplus
namespace cv
{
CV_EXPORTS bool initModule_video(void);
}
#endif #endif
#endif //__OPENCV_VIDEO_HPP__ #include "opencv2/video.hpp"

View File

@ -40,29 +40,37 @@
// //
//M*/ //M*/
#ifndef __OPENCV_VIDEOSTAB_DEBLURRING_HPP__ #ifndef OPENCV_VIDEOSTAB_DEBLURRING_HPP
#define __OPENCV_VIDEOSTAB_DEBLURRING_HPP__ #define OPENCV_VIDEOSTAB_DEBLURRING_HPP
#include <vector> #include <vector>
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
namespace cv namespace cv
{ {
namespace videostab namespace videostab
{ {
//! @addtogroup videostab
//! @{
CV_EXPORTS float calcBlurriness(const Mat &frame); CV_EXPORTS float calcBlurriness(const Mat &frame);
class CV_EXPORTS DeblurerBase class CV_EXPORTS DeblurerBase
{ {
public: public:
DeblurerBase() : radius_(0), frames_(0), motions_(0) {} DeblurerBase() : radius_(0), frames_(0), motions_(0), blurrinessRates_(0) {}
virtual ~DeblurerBase() {} virtual ~DeblurerBase() {}
virtual void setRadius(int val) { radius_ = val; } virtual void setRadius(int val) { radius_ = val; }
virtual int radius() const { return radius_; } virtual int radius() const { return radius_; }
virtual void deblur(int idx, Mat &frame) = 0;
// data from stabilizer
virtual void setFrames(const std::vector<Mat> &val) { frames_ = &val; } virtual void setFrames(const std::vector<Mat> &val) { frames_ = &val; }
virtual const std::vector<Mat>& frames() const { return *frames_; } virtual const std::vector<Mat>& frames() const { return *frames_; }
@ -72,10 +80,6 @@ public:
virtual void setBlurrinessRates(const std::vector<float> &val) { blurrinessRates_ = &val; } virtual void setBlurrinessRates(const std::vector<float> &val) { blurrinessRates_ = &val; }
virtual const std::vector<float>& blurrinessRates() const { return *blurrinessRates_; } virtual const std::vector<float>& blurrinessRates() const { return *blurrinessRates_; }
virtual void update() {}
virtual void deblur(int idx, Mat &frame) = 0;
protected: protected:
int radius_; int radius_;
const std::vector<Mat> *frames_; const std::vector<Mat> *frames_;
@ -86,7 +90,7 @@ protected:
class CV_EXPORTS NullDeblurer : public DeblurerBase class CV_EXPORTS NullDeblurer : public DeblurerBase
{ {
public: public:
virtual void deblur(int /*idx*/, Mat &/*frame*/) {} virtual void deblur(int /*idx*/, Mat &/*frame*/) CV_OVERRIDE {}
}; };
class CV_EXPORTS WeightingDeblurer : public DeblurerBase class CV_EXPORTS WeightingDeblurer : public DeblurerBase
@ -97,13 +101,15 @@ public:
void setSensitivity(float val) { sensitivity_ = val; } void setSensitivity(float val) { sensitivity_ = val; }
float sensitivity() const { return sensitivity_; } float sensitivity() const { return sensitivity_; }
virtual void deblur(int idx, Mat &frame); virtual void deblur(int idx, Mat &frame) CV_OVERRIDE;
private: private:
float sensitivity_; float sensitivity_;
Mat_<float> bSum_, gSum_, rSum_, wSum_; Mat_<float> bSum_, gSum_, rSum_, wSum_;
}; };
//! @}
} // namespace videostab } // namespace videostab
} // namespace cv } // namespace cv

View File

@ -40,28 +40,44 @@
// //
//M*/ //M*/
#ifndef __OPENCV_VIDEOSTAB_FAST_MARCHING_HPP__ #ifndef OPENCV_VIDEOSTAB_FAST_MARCHING_HPP
#define __OPENCV_VIDEOSTAB_FAST_MARCHING_HPP__ #define OPENCV_VIDEOSTAB_FAST_MARCHING_HPP
#include <cmath> #include <cmath>
#include <queue> #include <queue>
#include <algorithm> #include <algorithm>
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
namespace cv namespace cv
{ {
namespace videostab namespace videostab
{ {
// See http://iwi.eldoc.ub.rug.nl/FILES/root/2004/JGraphToolsTelea/2004JGraphToolsTelea.pdf //! @addtogroup videostab_marching
//! @{
/** @brief Describes the Fast Marching Method implementation.
See http://iwi.eldoc.ub.rug.nl/FILES/root/2004/JGraphToolsTelea/2004JGraphToolsTelea.pdf
*/
class CV_EXPORTS FastMarchingMethod class CV_EXPORTS FastMarchingMethod
{ {
public: public:
FastMarchingMethod() : inf_(1e6f) {} FastMarchingMethod() : inf_(1e6f), size_(0) {}
/** @brief Template method that runs the Fast Marching Method.
@param mask Image mask. 0 value indicates that the pixel value must be inpainted, 255 indicates
that the pixel value is known, other values aren't acceptable.
@param inpaint Inpainting functor that overloads void operator ()(int x, int y).
@return Inpainting functor.
*/
template <typename Inpaint> template <typename Inpaint>
Inpaint run(const Mat &mask, Inpaint inpaint); Inpaint run(const Mat &mask, Inpaint inpaint);
/**
@return Distance map that's created during working of the method.
*/
Mat distanceMap() const { return dist_; } Mat distanceMap() const { return dist_; }
private: private:
@ -95,6 +111,8 @@ private:
int size_; // narrow band size int size_; // narrow band size
}; };
//! @}
} // namespace videostab } // namespace videostab
} // namespace cv } // namespace cv

View File

@ -40,8 +40,8 @@
// //
//M*/ //M*/
#ifndef __OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP__ #ifndef OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP
#define __OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP__ #define OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP
#include "opencv2/videostab/fast_marching.hpp" #include "opencv2/videostab/fast_marching.hpp"
@ -53,7 +53,6 @@ namespace videostab
template <typename Inpaint> template <typename Inpaint>
Inpaint FastMarchingMethod::run(const cv::Mat &mask, Inpaint inpaint) Inpaint FastMarchingMethod::run(const cv::Mat &mask, Inpaint inpaint)
{ {
using namespace std;
using namespace cv; using namespace cv;
CV_Assert(mask.type() == CV_8U); CV_Assert(mask.type() == CV_8U);
@ -129,8 +128,8 @@ Inpaint FastMarchingMethod::run(const cv::Mat &mask, Inpaint inpaint)
if (xn >= 0 && xn < flag_.cols && yn >= 0 && yn < flag_.rows && flag_(yn,xn) != KNOWN) if (xn >= 0 && xn < flag_.cols && yn >= 0 && yn < flag_.rows && flag_(yn,xn) != KNOWN)
{ {
dist_(yn,xn) = min(min(solve(xn-1, yn, xn, yn-1), solve(xn+1, yn, xn, yn-1)), dist_(yn,xn) = std::min(std::min(solve(xn-1, yn, xn, yn-1), solve(xn+1, yn, xn, yn-1)),
min(solve(xn-1, yn, xn, yn+1), solve(xn+1, yn, xn, yn+1))); std::min(solve(xn-1, yn, xn, yn+1), solve(xn+1, yn, xn, yn+1)));
if (flag_(yn,xn) == INSIDE) if (flag_(yn,xn) == INSIDE)
{ {

View File

@ -40,19 +40,20 @@
// //
//M*/ //M*/
#ifndef __OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP__ #ifndef OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP
#define __OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP__ #define OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP
#include <vector> #include <vector>
#include <string> #include "opencv2/core.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
namespace cv namespace cv
{ {
namespace videostab namespace videostab
{ {
//! @addtogroup videostab
//! @{
class CV_EXPORTS IFrameSource class CV_EXPORTS IFrameSource
{ {
public: public:
@ -64,27 +65,29 @@ public:
class CV_EXPORTS NullFrameSource : public IFrameSource class CV_EXPORTS NullFrameSource : public IFrameSource
{ {
public: public:
virtual void reset() {} virtual void reset() CV_OVERRIDE {}
virtual Mat nextFrame() { return Mat(); } virtual Mat nextFrame() CV_OVERRIDE { return Mat(); }
}; };
class CV_EXPORTS VideoFileSource : public IFrameSource class CV_EXPORTS VideoFileSource : public IFrameSource
{ {
public: public:
VideoFileSource(const std::string &path, bool volatileFrame = false); VideoFileSource(const String &path, bool volatileFrame = false);
virtual void reset(); virtual void reset() CV_OVERRIDE;
virtual Mat nextFrame(); virtual Mat nextFrame() CV_OVERRIDE;
int frameCount() { return static_cast<int>(reader_.get(CV_CAP_PROP_FRAME_COUNT)); } int width();
double fps() { return reader_.get(CV_CAP_PROP_FPS); } int height();
int count();
double fps();
private: private:
std::string path_; Ptr<IFrameSource> impl;
bool volatileFrame_;
VideoCapture reader_;
}; };
//! @}
} // namespace videostab } // namespace videostab
} // namespace cv } // namespace cv

View File

@ -40,101 +40,260 @@
// //
//M*/ //M*/
#ifndef __OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP__ #ifndef OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP
#define __OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP__ #define OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP
#include <vector> #include <vector>
#include "opencv2/core/core.hpp" #include <fstream>
#include "opencv2/features2d/features2d.hpp" #include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/opencv_modules.hpp"
#include "opencv2/videostab/optical_flow.hpp" #include "opencv2/videostab/optical_flow.hpp"
#include "opencv2/videostab/motion_core.hpp"
#include "opencv2/videostab/outlier_rejection.hpp"
#ifdef HAVE_OPENCV_CUDAIMGPROC
# include "opencv2/cudaimgproc.hpp"
#endif
namespace cv namespace cv
{ {
namespace videostab namespace videostab
{ {
enum MotionModel //! @addtogroup videostab_motion
{ //! @{
TRANSLATION = 0,
TRANSLATION_AND_SCALE = 1,
LINEAR_SIMILARITY = 2,
AFFINE = 3
};
/** @brief Estimates best global motion between two 2D point clouds in the least-squares sense.
@note Works in-place and changes input point arrays.
@param points0 Source set of 2D points (32F).
@param points1 Destination set of 2D points (32F).
@param model Motion model (up to MM_AFFINE).
@param rmse Final root-mean-square error.
@return 3x3 2D transformation matrix (32F).
*/
CV_EXPORTS Mat estimateGlobalMotionLeastSquares( CV_EXPORTS Mat estimateGlobalMotionLeastSquares(
const std::vector<Point2f> &points0, const std::vector<Point2f> &points1, InputOutputArray points0, InputOutputArray points1, int model = MM_AFFINE,
int model = AFFINE, float *rmse = 0); float *rmse = 0);
struct CV_EXPORTS RansacParams /** @brief Estimates best global motion between two 2D point clouds robustly (using RANSAC method).
{
int size; // subset size
float thresh; // max error to classify as inlier
float eps; // max outliers ratio
float prob; // probability of success
RansacParams(int _size, float _thresh, float _eps, float _prob) @param points0 Source set of 2D points (32F).
: size(_size), thresh(_thresh), eps(_eps), prob(_prob) {} @param points1 Destination set of 2D points (32F).
@param model Motion model. See cv::videostab::MotionModel.
static RansacParams translationMotionStd() { return RansacParams(2, 0.5f, 0.5f, 0.99f); } @param params RANSAC method parameters. See videostab::RansacParams.
static RansacParams translationAndScale2dMotionStd() { return RansacParams(3, 0.5f, 0.5f, 0.99f); } @param rmse Final root-mean-square error.
static RansacParams linearSimilarityMotionStd() { return RansacParams(4, 0.5f, 0.5f, 0.99f); } @param ninliers Final number of inliers.
static RansacParams affine2dMotionStd() { return RansacParams(6, 0.5f, 0.5f, 0.99f); } */
}; CV_EXPORTS Mat estimateGlobalMotionRansac(
InputArray points0, InputArray points1, int model = MM_AFFINE,
CV_EXPORTS Mat estimateGlobalMotionRobust( const RansacParams &params = RansacParams::default2dMotion(MM_AFFINE),
const std::vector<Point2f> &points0, const std::vector<Point2f> &points1,
int model = AFFINE, const RansacParams &params = RansacParams::affine2dMotionStd(),
float *rmse = 0, int *ninliers = 0); float *rmse = 0, int *ninliers = 0);
class CV_EXPORTS IGlobalMotionEstimator /** @brief Base class for all global motion estimation methods.
*/
class CV_EXPORTS MotionEstimatorBase
{ {
public: public:
virtual ~IGlobalMotionEstimator() {} virtual ~MotionEstimatorBase() {}
virtual Mat estimate(const Mat &frame0, const Mat &frame1) = 0;
/** @brief Sets motion model.
@param val Motion model. See cv::videostab::MotionModel.
*/
virtual void setMotionModel(MotionModel val) { motionModel_ = val; }
/**
@return Motion model. See cv::videostab::MotionModel.
*/
virtual MotionModel motionModel() const { return motionModel_; }
/** @brief Estimates global motion between two 2D point clouds.
@param points0 Source set of 2D points (32F).
@param points1 Destination set of 2D points (32F).
@param ok Indicates whether motion was estimated successfully.
@return 3x3 2D transformation matrix (32F).
*/
virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0) = 0;
protected:
MotionEstimatorBase(MotionModel model) { setMotionModel(model); }
private:
MotionModel motionModel_;
}; };
class CV_EXPORTS PyrLkRobustMotionEstimator : public IGlobalMotionEstimator /** @brief Describes a robust RANSAC-based global 2D motion estimation method which minimizes L2 error.
*/
class CV_EXPORTS MotionEstimatorRansacL2 : public MotionEstimatorBase
{ {
public: public:
PyrLkRobustMotionEstimator(); MotionEstimatorRansacL2(MotionModel model = MM_AFFINE);
void setDetector(Ptr<FeatureDetector> val) { detector_ = val; }
Ptr<FeatureDetector> detector() const { return detector_; }
void setOptFlowEstimator(Ptr<ISparseOptFlowEstimator> val) { optFlowEstimator_ = val; }
Ptr<ISparseOptFlowEstimator> optFlowEstimator() const { return optFlowEstimator_; }
void setMotionModel(MotionModel val) { motionModel_ = val; }
MotionModel motionModel() const { return motionModel_; }
void setRansacParams(const RansacParams &val) { ransacParams_ = val; } void setRansacParams(const RansacParams &val) { ransacParams_ = val; }
RansacParams ransacParams() const { return ransacParams_; } RansacParams ransacParams() const { return ransacParams_; }
void setMaxRmse(float val) { maxRmse_ = val; }
float maxRmse() const { return maxRmse_; }
void setMinInlierRatio(float val) { minInlierRatio_ = val; } void setMinInlierRatio(float val) { minInlierRatio_ = val; }
float minInlierRatio() const { return minInlierRatio_; } float minInlierRatio() const { return minInlierRatio_; }
virtual Mat estimate(const Mat &frame0, const Mat &frame1); virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0) CV_OVERRIDE;
private: private:
RansacParams ransacParams_;
float minInlierRatio_;
};
/** @brief Describes a global 2D motion estimation method which minimizes L1 error.
@note To be able to use this method you must build OpenCV with CLP library support. :
*/
class CV_EXPORTS MotionEstimatorL1 : public MotionEstimatorBase
{
public:
MotionEstimatorL1(MotionModel model = MM_AFFINE);
virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0) CV_OVERRIDE;
private:
std::vector<double> obj_, collb_, colub_;
std::vector<double> elems_, rowlb_, rowub_;
std::vector<int> rows_, cols_;
void set(int row, int col, double coef)
{
rows_.push_back(row);
cols_.push_back(col);
elems_.push_back(coef);
}
};
/** @brief Base class for global 2D motion estimation methods which take frames as input.
*/
class CV_EXPORTS ImageMotionEstimatorBase
{
public:
virtual ~ImageMotionEstimatorBase() {}
virtual void setMotionModel(MotionModel val) { motionModel_ = val; }
virtual MotionModel motionModel() const { return motionModel_; }
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) = 0;
protected:
ImageMotionEstimatorBase(MotionModel model) { setMotionModel(model); }
private:
MotionModel motionModel_;
};
class CV_EXPORTS FromFileMotionReader : public ImageMotionEstimatorBase
{
public:
FromFileMotionReader(const String &path);
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) CV_OVERRIDE;
private:
std::ifstream file_;
};
class CV_EXPORTS ToFileMotionWriter : public ImageMotionEstimatorBase
{
public:
ToFileMotionWriter(const String &path, Ptr<ImageMotionEstimatorBase> estimator);
virtual void setMotionModel(MotionModel val) CV_OVERRIDE { motionEstimator_->setMotionModel(val); }
virtual MotionModel motionModel() const CV_OVERRIDE { return motionEstimator_->motionModel(); }
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) CV_OVERRIDE;
private:
std::ofstream file_;
Ptr<ImageMotionEstimatorBase> motionEstimator_;
};
/** @brief Describes a global 2D motion estimation method which uses keypoints detection and optical flow for
matching.
*/
class CV_EXPORTS KeypointBasedMotionEstimator : public ImageMotionEstimatorBase
{
public:
KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator);
virtual void setMotionModel(MotionModel val) CV_OVERRIDE { motionEstimator_->setMotionModel(val); }
virtual MotionModel motionModel() const CV_OVERRIDE { return motionEstimator_->motionModel(); }
void setDetector(Ptr<FeatureDetector> val) { detector_ = val; }
Ptr<FeatureDetector> detector() const { return detector_; }
void setOpticalFlowEstimator(Ptr<ISparseOptFlowEstimator> val) { optFlowEstimator_ = val; }
Ptr<ISparseOptFlowEstimator> opticalFlowEstimator() const { return optFlowEstimator_; }
void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }
Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) CV_OVERRIDE;
Mat estimate(InputArray frame0, InputArray frame1, bool *ok = 0);
private:
Ptr<MotionEstimatorBase> motionEstimator_;
Ptr<FeatureDetector> detector_; Ptr<FeatureDetector> detector_;
Ptr<ISparseOptFlowEstimator> optFlowEstimator_; Ptr<ISparseOptFlowEstimator> optFlowEstimator_;
MotionModel motionModel_; Ptr<IOutlierRejector> outlierRejector_;
RansacParams ransacParams_;
std::vector<uchar> status_; std::vector<uchar> status_;
std::vector<KeyPoint> keypointsPrev_; std::vector<KeyPoint> keypointsPrev_;
std::vector<Point2f> pointsPrev_, points_; std::vector<Point2f> pointsPrev_, points_;
std::vector<Point2f> pointsPrevGood_, pointsGood_; std::vector<Point2f> pointsPrevGood_, pointsGood_;
float maxRmse_;
float minInlierRatio_;
}; };
CV_EXPORTS Mat getMotion(int from, int to, const Mat *motions, int size); #if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)
class CV_EXPORTS KeypointBasedMotionEstimatorGpu : public ImageMotionEstimatorBase
{
public:
KeypointBasedMotionEstimatorGpu(Ptr<MotionEstimatorBase> estimator);
virtual void setMotionModel(MotionModel val) CV_OVERRIDE { motionEstimator_->setMotionModel(val); }
virtual MotionModel motionModel() const CV_OVERRIDE { return motionEstimator_->motionModel(); }
void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }
Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) CV_OVERRIDE;
Mat estimate(const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, bool *ok = 0);
private:
Ptr<MotionEstimatorBase> motionEstimator_;
Ptr<cuda::CornersDetector> detector_;
SparsePyrLkOptFlowEstimatorGpu optFlowEstimator_;
Ptr<IOutlierRejector> outlierRejector_;
cuda::GpuMat frame0_, grayFrame0_, frame1_;
cuda::GpuMat pointsPrev_, points_;
cuda::GpuMat status_;
Mat hostPointsPrev_, hostPoints_;
std::vector<Point2f> hostPointsPrevTmp_, hostPointsTmp_;
std::vector<uchar> rejectionStatus_;
};
#endif // defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)
/** @brief Computes motion between two frames assuming that all the intermediate motions are known.
@param from Source frame index.
@param to Destination frame index.
@param motions Pair-wise motions. motions[i] denotes motion from the frame i to the frame i+1
@return Motion from the Source frame to the Destination frame.
*/
CV_EXPORTS Mat getMotion(int from, int to, const std::vector<Mat> &motions); CV_EXPORTS Mat getMotion(int from, int to, const std::vector<Mat> &motions);
//! @}
} // namespace videostab } // namespace videostab
} // namespace cv } // namespace cv

View File

@ -40,25 +40,29 @@
// //
//M*/ //M*/
#ifndef __OPENCV_VIDEOSTAB_INPAINTINT_HPP__ #ifndef OPENCV_VIDEOSTAB_INPAINTINT_HPP
#define __OPENCV_VIDEOSTAB_INPAINTINT_HPP__ #define OPENCV_VIDEOSTAB_INPAINTINT_HPP
#include <vector> #include <vector>
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
#include "opencv2/videostab/optical_flow.hpp" #include "opencv2/videostab/optical_flow.hpp"
#include "opencv2/videostab/fast_marching.hpp" #include "opencv2/videostab/fast_marching.hpp"
#include "opencv2/photo/photo.hpp" #include "opencv2/videostab/global_motion.hpp"
#include "opencv2/photo.hpp"
namespace cv namespace cv
{ {
namespace videostab namespace videostab
{ {
//! @addtogroup videostab
//! @{
class CV_EXPORTS InpainterBase class CV_EXPORTS InpainterBase
{ {
public: public:
InpainterBase() InpainterBase()
: radius_(0), frames_(0), motions_(0), : radius_(0), motionModel_(MM_UNKNOWN), frames_(0), motions_(0),
stabilizedFrames_(0), stabilizationMotions_(0) {} stabilizedFrames_(0), stabilizationMotions_(0) {}
virtual ~InpainterBase() {} virtual ~InpainterBase() {}
@ -66,6 +70,14 @@ public:
virtual void setRadius(int val) { radius_ = val; } virtual void setRadius(int val) { radius_ = val; }
virtual int radius() const { return radius_; } virtual int radius() const { return radius_; }
virtual void setMotionModel(MotionModel val) { motionModel_ = val; }
virtual MotionModel motionModel() const { return motionModel_; }
virtual void inpaint(int idx, Mat &frame, Mat &mask) = 0;
// data from stabilizer
virtual void setFrames(const std::vector<Mat> &val) { frames_ = &val; } virtual void setFrames(const std::vector<Mat> &val) { frames_ = &val; }
virtual const std::vector<Mat>& frames() const { return *frames_; } virtual const std::vector<Mat>& frames() const { return *frames_; }
@ -78,12 +90,9 @@ public:
virtual void setStabilizationMotions(const std::vector<Mat> &val) { stabilizationMotions_ = &val; } virtual void setStabilizationMotions(const std::vector<Mat> &val) { stabilizationMotions_ = &val; }
virtual const std::vector<Mat>& stabilizationMotions() const { return *stabilizationMotions_; } virtual const std::vector<Mat>& stabilizationMotions() const { return *stabilizationMotions_; }
virtual void update() {}
virtual void inpaint(int idx, Mat &frame, Mat &mask) = 0;
protected: protected:
int radius_; int radius_;
MotionModel motionModel_;
const std::vector<Mat> *frames_; const std::vector<Mat> *frames_;
const std::vector<Mat> *motions_; const std::vector<Mat> *motions_;
const std::vector<Mat> *stabilizedFrames_; const std::vector<Mat> *stabilizedFrames_;
@ -93,7 +102,7 @@ protected:
class CV_EXPORTS NullInpainter : public InpainterBase class CV_EXPORTS NullInpainter : public InpainterBase
{ {
public: public:
virtual void inpaint(int /*idx*/, Mat &/*frame*/, Mat &/*mask*/) {} virtual void inpaint(int /*idx*/, Mat &/*frame*/, Mat &/*mask*/) CV_OVERRIDE {}
}; };
class CV_EXPORTS InpaintingPipeline : public InpainterBase class CV_EXPORTS InpaintingPipeline : public InpainterBase
@ -102,15 +111,14 @@ public:
void pushBack(Ptr<InpainterBase> inpainter) { inpainters_.push_back(inpainter); } void pushBack(Ptr<InpainterBase> inpainter) { inpainters_.push_back(inpainter); }
bool empty() const { return inpainters_.empty(); } bool empty() const { return inpainters_.empty(); }
virtual void setRadius(int val); virtual void setRadius(int val) CV_OVERRIDE;
virtual void setFrames(const std::vector<Mat> &val); virtual void setMotionModel(MotionModel val) CV_OVERRIDE;
virtual void setMotions(const std::vector<Mat> &val); virtual void setFrames(const std::vector<Mat> &val) CV_OVERRIDE;
virtual void setStabilizedFrames(const std::vector<Mat> &val); virtual void setMotions(const std::vector<Mat> &val) CV_OVERRIDE;
virtual void setStabilizationMotions(const std::vector<Mat> &val); virtual void setStabilizedFrames(const std::vector<Mat> &val) CV_OVERRIDE;
virtual void setStabilizationMotions(const std::vector<Mat> &val) CV_OVERRIDE;
virtual void update(); virtual void inpaint(int idx, Mat &frame, Mat &mask) CV_OVERRIDE;
virtual void inpaint(int idx, Mat &frame, Mat &mask);
private: private:
std::vector<Ptr<InpainterBase> > inpainters_; std::vector<Ptr<InpainterBase> > inpainters_;
@ -124,7 +132,7 @@ public:
void setStdevThresh(float val) { stdevThresh_ = val; } void setStdevThresh(float val) { stdevThresh_ = val; }
float stdevThresh() const { return stdevThresh_; } float stdevThresh() const { return stdevThresh_; }
virtual void inpaint(int idx, Mat &frame, Mat &mask); virtual void inpaint(int idx, Mat &frame, Mat &mask) CV_OVERRIDE;
private: private:
float stdevThresh_; float stdevThresh_;
@ -147,7 +155,7 @@ public:
void setBorderMode(int val) { borderMode_ = val; } void setBorderMode(int val) { borderMode_ = val; }
int borderMode() const { return borderMode_; } int borderMode() const { return borderMode_; }
virtual void inpaint(int idx, Mat &frame, Mat &mask); virtual void inpaint(int idx, Mat &frame, Mat &mask) CV_OVERRIDE;
private: private:
FastMarchingMethod fmm_; FastMarchingMethod fmm_;
@ -166,7 +174,7 @@ private:
class CV_EXPORTS ColorAverageInpainter : public InpainterBase class CV_EXPORTS ColorAverageInpainter : public InpainterBase
{ {
public: public:
virtual void inpaint(int idx, Mat &frame, Mat &mask); virtual void inpaint(int idx, Mat &frame, Mat &mask) CV_OVERRIDE;
private: private:
FastMarchingMethod fmm_; FastMarchingMethod fmm_;
@ -175,10 +183,9 @@ private:
class CV_EXPORTS ColorInpainter : public InpainterBase class CV_EXPORTS ColorInpainter : public InpainterBase
{ {
public: public:
ColorInpainter(int method = INPAINT_TELEA, double _radius = 2.) ColorInpainter(int method = INPAINT_TELEA, double radius = 2.);
: method_(method), radius_(_radius) {}
virtual void inpaint(int idx, Mat &frame, Mat &mask); virtual void inpaint(int idx, Mat &frame, Mat &mask) CV_OVERRIDE;
private: private:
int method_; int method_;
@ -186,6 +193,9 @@ private:
Mat invMask_; Mat invMask_;
}; };
inline ColorInpainter::ColorInpainter(int _method, double _radius)
: method_(_method), radius_(_radius) {}
CV_EXPORTS void calcFlowMask( CV_EXPORTS void calcFlowMask(
const Mat &flowX, const Mat &flowY, const Mat &errors, float maxError, const Mat &flowX, const Mat &flowY, const Mat &errors, float maxError,
const Mat &mask0, const Mat &mask1, Mat &flowMask); const Mat &mask0, const Mat &mask1, Mat &flowMask);
@ -194,6 +204,8 @@ CV_EXPORTS void completeFrameAccordingToFlow(
const Mat &flowMask, const Mat &flowX, const Mat &flowY, const Mat &frame1, const Mat &mask1, const Mat &flowMask, const Mat &flowX, const Mat &flowY, const Mat &frame1, const Mat &mask1,
float distThresh, Mat& frame0, Mat &mask0); float distThresh, Mat& frame0, Mat &mask0);
//! @}
} // namespace videostab } // namespace videostab
} // namespace cv } // namespace cv

View File

@ -40,16 +40,19 @@
// //
//M*/ //M*/
#ifndef __OPENCV_VIDEOSTAB_LOG_HPP__ #ifndef OPENCV_VIDEOSTAB_LOG_HPP
#define __OPENCV_VIDEOSTAB_LOG_HPP__ #define OPENCV_VIDEOSTAB_LOG_HPP
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
namespace cv namespace cv
{ {
namespace videostab namespace videostab
{ {
//! @addtogroup videostab
//! @{
class CV_EXPORTS ILog class CV_EXPORTS ILog
{ {
public: public:
@ -60,15 +63,17 @@ public:
class CV_EXPORTS NullLog : public ILog class CV_EXPORTS NullLog : public ILog
{ {
public: public:
virtual void print(const char * /*format*/, ...) {} virtual void print(const char * /*format*/, ...) CV_OVERRIDE {}
}; };
class CV_EXPORTS LogToStdout : public ILog class CV_EXPORTS LogToStdout : public ILog
{ {
public: public:
virtual void print(const char *format, ...); virtual void print(const char *format, ...) CV_OVERRIDE;
}; };
//! @}
} // namespace videostab } // namespace videostab
} // namespace cv } // namespace cv

View File

@ -40,66 +40,134 @@
// //
//M*/ //M*/
#ifndef __OPENCV_VIDEOSTAB_MOTION_STABILIZING_HPP__ #ifndef OPENCV_VIDEOSTAB_MOTION_STABILIZING_HPP
#define __OPENCV_VIDEOSTAB_MOTION_STABILIZING_HPP__ #define OPENCV_VIDEOSTAB_MOTION_STABILIZING_HPP
#include <vector> #include <vector>
#include "opencv2/core/core.hpp" #include <utility>
#include "opencv2/core.hpp"
#include "opencv2/videostab/global_motion.hpp"
namespace cv namespace cv
{ {
namespace videostab namespace videostab
{ {
//! @addtogroup videostab_motion
//! @{
class CV_EXPORTS IMotionStabilizer class CV_EXPORTS IMotionStabilizer
{ {
public: public:
virtual void stabilize(const Mat *motions, int size, Mat *stabilizationMotions) const = 0;
#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY
virtual ~IMotionStabilizer() {} virtual ~IMotionStabilizer() {}
#endif
//! assumes that [0, size-1) is in or equals to [range.first, range.second)
virtual void stabilize(
int size, const std::vector<Mat> &motions, std::pair<int,int> range,
Mat *stabilizationMotions) = 0;
};
class CV_EXPORTS MotionStabilizationPipeline : public IMotionStabilizer
{
public:
void pushBack(Ptr<IMotionStabilizer> stabilizer) { stabilizers_.push_back(stabilizer); }
bool empty() const { return stabilizers_.empty(); }
virtual void stabilize(
int size, const std::vector<Mat> &motions, std::pair<int,int> range,
Mat *stabilizationMotions) CV_OVERRIDE;
private:
std::vector<Ptr<IMotionStabilizer> > stabilizers_;
}; };
class CV_EXPORTS MotionFilterBase : public IMotionStabilizer class CV_EXPORTS MotionFilterBase : public IMotionStabilizer
{ {
public: public:
MotionFilterBase() : radius_(0) {}
virtual ~MotionFilterBase() {} virtual ~MotionFilterBase() {}
virtual void setRadius(int val) { radius_ = val; } virtual Mat stabilize(
virtual int radius() const { return radius_; } int idx, const std::vector<Mat> &motions, std::pair<int,int> range) = 0;
virtual void update() {} virtual void stabilize(
int size, const std::vector<Mat> &motions, std::pair<int,int> range,
virtual Mat stabilize(int index, const Mat *motions, int size) const = 0; Mat *stabilizationMotions) CV_OVERRIDE;
virtual void stabilize(const Mat *motions, int size, Mat *stabilizationMotions) const;
protected:
int radius_;
}; };
class CV_EXPORTS GaussianMotionFilter : public MotionFilterBase class CV_EXPORTS GaussianMotionFilter : public MotionFilterBase
{ {
public: public:
GaussianMotionFilter() : stdev_(-1.f) {} GaussianMotionFilter(int radius = 15, float stdev = -1.f);
void setStdev(float val) { stdev_ = val; } void setParams(int radius, float stdev = -1.f);
int radius() const { return radius_; }
float stdev() const { return stdev_; } float stdev() const { return stdev_; }
virtual void update(); virtual Mat stabilize(
int idx, const std::vector<Mat> &motions, std::pair<int,int> range) CV_OVERRIDE;
virtual Mat stabilize(int index, const Mat *motions, int size) const;
private: private:
int radius_;
float stdev_; float stdev_;
std::vector<float> weight_; std::vector<float> weight_;
}; };
inline GaussianMotionFilter::GaussianMotionFilter(int _radius, float _stdev) { setParams(_radius, _stdev); }
class CV_EXPORTS LpMotionStabilizer : public IMotionStabilizer
{
public:
LpMotionStabilizer(MotionModel model = MM_SIMILARITY);
void setMotionModel(MotionModel val) { model_ = val; }
MotionModel motionModel() const { return model_; }
void setFrameSize(Size val) { frameSize_ = val; }
Size frameSize() const { return frameSize_; }
void setTrimRatio(float val) { trimRatio_ = val; }
float trimRatio() const { return trimRatio_; }
void setWeight1(float val) { w1_ = val; }
float weight1() const { return w1_; }
void setWeight2(float val) { w2_ = val; }
float weight2() const { return w2_; }
void setWeight3(float val) { w3_ = val; }
float weight3() const { return w3_; }
void setWeight4(float val) { w4_ = val; }
float weight4() const { return w4_; }
virtual void stabilize(
int size, const std::vector<Mat> &motions, std::pair<int,int> range,
Mat *stabilizationMotions) CV_OVERRIDE;
private:
MotionModel model_;
Size frameSize_;
float trimRatio_;
float w1_, w2_, w3_, w4_;
std::vector<double> obj_, collb_, colub_;
std::vector<int> rows_, cols_;
std::vector<double> elems_, rowlb_, rowub_;
void set(int row, int col, double coef)
{
rows_.push_back(row);
cols_.push_back(col);
elems_.push_back(coef);
}
};
CV_EXPORTS Mat ensureInclusionConstraint(const Mat &M, Size size, float trimRatio); CV_EXPORTS Mat ensureInclusionConstraint(const Mat &M, Size size, float trimRatio);
CV_EXPORTS float estimateOptimalTrimRatio(const Mat &M, Size size); CV_EXPORTS float estimateOptimalTrimRatio(const Mat &M, Size size);
//! @}
} // namespace videostab } // namespace videostab
} // namespace } // namespace

View File

@ -40,14 +40,14 @@
// //
//M*/ //M*/
#ifndef __OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP__ #ifndef OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP
#define __OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP__ #define OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP
#include "opencv2/core/core.hpp" #include "opencv2/core.hpp"
#include "opencv2/opencv_modules.hpp" #include "opencv2/opencv_modules.hpp"
#if defined(HAVE_OPENCV_GPU) && !defined(ANDROID) #ifdef HAVE_OPENCV_CUDAOPTFLOW
# include "opencv2/gpu/gpu.hpp" #include "opencv2/cudaoptflow.hpp"
#endif #endif
namespace cv namespace cv
@ -55,6 +55,9 @@ namespace cv
namespace videostab namespace videostab
{ {
//! @addtogroup videostab
//! @{
class CV_EXPORTS ISparseOptFlowEstimator class CV_EXPORTS ISparseOptFlowEstimator
{ {
public: public:
@ -78,11 +81,12 @@ class CV_EXPORTS PyrLkOptFlowEstimatorBase
public: public:
PyrLkOptFlowEstimatorBase() { setWinSize(Size(21, 21)); setMaxLevel(3); } PyrLkOptFlowEstimatorBase() { setWinSize(Size(21, 21)); setMaxLevel(3); }
void setWinSize(Size val) { winSize_ = val; } virtual void setWinSize(Size val) { winSize_ = val; }
Size winSize() const { return winSize_; } virtual Size winSize() const { return winSize_; }
void setMaxLevel(int val) { maxLevel_ = val; } virtual void setMaxLevel(int val) { maxLevel_ = val; }
int maxLevel() const { return maxLevel_; } virtual int maxLevel() const { return maxLevel_; }
virtual ~PyrLkOptFlowEstimatorBase() {}
protected: protected:
Size winSize_; Size winSize_;
@ -95,10 +99,32 @@ class CV_EXPORTS SparsePyrLkOptFlowEstimator
public: public:
virtual void run( virtual void run(
InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1, InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
OutputArray status, OutputArray errors); OutputArray status, OutputArray errors) CV_OVERRIDE;
};
#ifdef HAVE_OPENCV_CUDAOPTFLOW
class CV_EXPORTS SparsePyrLkOptFlowEstimatorGpu
: public PyrLkOptFlowEstimatorBase, public ISparseOptFlowEstimator
{
public:
SparsePyrLkOptFlowEstimatorGpu();
virtual void run(
InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
OutputArray status, OutputArray errors) CV_OVERRIDE;
void run(const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, const cuda::GpuMat &points0, cuda::GpuMat &points1,
cuda::GpuMat &status, cuda::GpuMat &errors);
void run(const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, const cuda::GpuMat &points0, cuda::GpuMat &points1,
cuda::GpuMat &status);
private:
Ptr<cuda::SparsePyrLKOpticalFlow> optFlowEstimator_;
cuda::GpuMat frame0_, frame1_, points0_, points1_, status_, errors_;
}; };
#if defined(HAVE_OPENCV_GPU) && !defined(ANDROID)
class CV_EXPORTS DensePyrLkOptFlowEstimatorGpu class CV_EXPORTS DensePyrLkOptFlowEstimatorGpu
: public PyrLkOptFlowEstimatorBase, public IDenseOptFlowEstimator : public PyrLkOptFlowEstimatorBase, public IDenseOptFlowEstimator
{ {
@ -107,13 +133,17 @@ public:
virtual void run( virtual void run(
InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY, InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,
OutputArray errors); OutputArray errors) CV_OVERRIDE;
private: private:
gpu::PyrLKOpticalFlow optFlowEstimator_; Ptr<cuda::DensePyrLKOpticalFlow> optFlowEstimator_;
gpu::GpuMat frame0_, frame1_, flowX_, flowY_, errors_; cuda::GpuMat frame0_, frame1_, flowX_, flowY_, errors_;
}; };
#endif #endif
//! @}
} // namespace videostab } // namespace videostab
} // namespace cv } // namespace cv

View File

@ -40,30 +40,35 @@
// //
//M*/ //M*/
#ifndef __OPENCV_VIDEOSTAB_STABILIZER_HPP__ #ifndef OPENCV_VIDEOSTAB_STABILIZER_HPP
#define __OPENCV_VIDEOSTAB_STABILIZER_HPP__ #define OPENCV_VIDEOSTAB_STABILIZER_HPP
#include <vector> #include <vector>
#include "opencv2/core/core.hpp" #include <ctime>
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videostab/global_motion.hpp" #include "opencv2/videostab/global_motion.hpp"
#include "opencv2/videostab/motion_stabilizing.hpp" #include "opencv2/videostab/motion_stabilizing.hpp"
#include "opencv2/videostab/frame_source.hpp" #include "opencv2/videostab/frame_source.hpp"
#include "opencv2/videostab/log.hpp" #include "opencv2/videostab/log.hpp"
#include "opencv2/videostab/inpainting.hpp" #include "opencv2/videostab/inpainting.hpp"
#include "opencv2/videostab/deblurring.hpp" #include "opencv2/videostab/deblurring.hpp"
#include "opencv2/videostab/wobble_suppression.hpp"
namespace cv namespace cv
{ {
namespace videostab namespace videostab
{ {
//! @addtogroup videostab
//! @{
class CV_EXPORTS StabilizerBase class CV_EXPORTS StabilizerBase
{ {
public: public:
virtual ~StabilizerBase() {} virtual ~StabilizerBase() {}
void setLog(Ptr<ILog> _log) { log_ = _log; } void setLog(Ptr<ILog> ilog) { log_ = ilog; }
Ptr<ILog> log() const { return log_; } Ptr<ILog> log() const { return log_; }
void setRadius(int val) { radius_ = val; } void setRadius(int val) { radius_ = val; }
@ -72,8 +77,8 @@ public:
void setFrameSource(Ptr<IFrameSource> val) { frameSource_ = val; } void setFrameSource(Ptr<IFrameSource> val) { frameSource_ = val; }
Ptr<IFrameSource> frameSource() const { return frameSource_; } Ptr<IFrameSource> frameSource() const { return frameSource_; }
void setMotionEstimator(Ptr<IGlobalMotionEstimator> val) { motionEstimator_ = val; } void setMotionEstimator(Ptr<ImageMotionEstimatorBase> val) { motionEstimator_ = val; }
Ptr<IGlobalMotionEstimator> motionEstimator() const { return motionEstimator_; } Ptr<ImageMotionEstimatorBase> motionEstimator() const { return motionEstimator_; }
void setDeblurer(Ptr<DeblurerBase> val) { deblurer_ = val; } void setDeblurer(Ptr<DeblurerBase> val) { deblurer_ = val; }
Ptr<DeblurerBase> deblurrer() const { return deblurer_; } Ptr<DeblurerBase> deblurrer() const { return deblurer_; }
@ -93,18 +98,19 @@ public:
protected: protected:
StabilizerBase(); StabilizerBase();
void setUp(int cacheSize, const Mat &frame); void reset();
Mat nextStabilizedFrame(); Mat nextStabilizedFrame();
bool doOneIteration(); bool doOneIteration();
void stabilizeFrame(const Mat &stabilizationMotion); virtual void setUp(const Mat &firstFrame);
virtual Mat estimateMotion() = 0;
virtual void setUp(Mat &firstFrame) = 0; virtual Mat estimateStabilizationMotion() = 0;
virtual void stabilizeFrame() = 0; void stabilizeFrame();
virtual void estimateMotion() = 0; virtual Mat postProcessFrame(const Mat &frame);
void logProcessingTime();
Ptr<ILog> log_; Ptr<ILog> log_;
Ptr<IFrameSource> frameSource_; Ptr<IFrameSource> frameSource_;
Ptr<IGlobalMotionEstimator> motionEstimator_; Ptr<ImageMotionEstimatorBase> motionEstimator_;
Ptr<DeblurerBase> deblurer_; Ptr<DeblurerBase> deblurer_;
Ptr<InpainterBase> inpainter_; Ptr<InpainterBase> inpainter_;
int radius_; int radius_;
@ -120,12 +126,14 @@ protected:
Mat preProcessedFrame_; Mat preProcessedFrame_;
bool doInpainting_; bool doInpainting_;
Mat inpaintingMask_; Mat inpaintingMask_;
Mat finalFrame_;
std::vector<Mat> frames_; std::vector<Mat> frames_;
std::vector<Mat> motions_; // motions_[i] is the motion from i-th to i+1-th frame std::vector<Mat> motions_; // motions_[i] is the motion from i-th to i+1-th frame
std::vector<float> blurrinessRates_; std::vector<float> blurrinessRates_;
std::vector<Mat> stabilizedFrames_; std::vector<Mat> stabilizedFrames_;
std::vector<Mat> stabilizedMasks_; std::vector<Mat> stabilizedMasks_;
std::vector<Mat> stabilizationMotions_; std::vector<Mat> stabilizationMotions_;
clock_t processingStartTime_;
}; };
class CV_EXPORTS OnePassStabilizer : public StabilizerBase, public IFrameSource class CV_EXPORTS OnePassStabilizer : public StabilizerBase, public IFrameSource
@ -136,15 +144,14 @@ public:
void setMotionFilter(Ptr<MotionFilterBase> val) { motionFilter_ = val; } void setMotionFilter(Ptr<MotionFilterBase> val) { motionFilter_ = val; }
Ptr<MotionFilterBase> motionFilter() const { return motionFilter_; } Ptr<MotionFilterBase> motionFilter() const { return motionFilter_; }
virtual void reset() { resetImpl(); } virtual void reset() CV_OVERRIDE;
virtual Mat nextFrame() { return nextStabilizedFrame(); } virtual Mat nextFrame() CV_OVERRIDE { return nextStabilizedFrame(); }
private: protected:
void resetImpl(); virtual void setUp(const Mat &firstFrame) CV_OVERRIDE;
virtual Mat estimateMotion() CV_OVERRIDE;
virtual void setUp(Mat &firstFrame); virtual Mat estimateStabilizationMotion() CV_OVERRIDE;
virtual void estimateMotion(); virtual Mat postProcessFrame(const Mat &frame) CV_OVERRIDE;
virtual void stabilizeFrame();
Ptr<MotionFilterBase> motionFilter_; Ptr<MotionFilterBase> motionFilter_;
}; };
@ -157,30 +164,36 @@ public:
void setMotionStabilizer(Ptr<IMotionStabilizer> val) { motionStabilizer_ = val; } void setMotionStabilizer(Ptr<IMotionStabilizer> val) { motionStabilizer_ = val; }
Ptr<IMotionStabilizer> motionStabilizer() const { return motionStabilizer_; } Ptr<IMotionStabilizer> motionStabilizer() const { return motionStabilizer_; }
void setWobbleSuppressor(Ptr<WobbleSuppressorBase> val) { wobbleSuppressor_ = val; }
Ptr<WobbleSuppressorBase> wobbleSuppressor() const { return wobbleSuppressor_; }
void setEstimateTrimRatio(bool val) { mustEstTrimRatio_ = val; } void setEstimateTrimRatio(bool val) { mustEstTrimRatio_ = val; }
bool mustEstimateTrimaRatio() const { return mustEstTrimRatio_; } bool mustEstimateTrimaRatio() const { return mustEstTrimRatio_; }
virtual void reset() { resetImpl(); } virtual void reset() CV_OVERRIDE;
virtual Mat nextFrame(); virtual Mat nextFrame() CV_OVERRIDE;
// available after pre-pass, before it's empty protected:
std::vector<Mat> motions() const;
private:
void resetImpl();
void runPrePassIfNecessary(); void runPrePassIfNecessary();
virtual void setUp(Mat &firstFrame); virtual void setUp(const Mat &firstFrame) CV_OVERRIDE;
virtual void estimateMotion() { /* do nothing as motion was estimation in pre-pass */ } virtual Mat estimateMotion() CV_OVERRIDE;
virtual void stabilizeFrame(); virtual Mat estimateStabilizationMotion() CV_OVERRIDE;
virtual Mat postProcessFrame(const Mat &frame) CV_OVERRIDE;
Ptr<IMotionStabilizer> motionStabilizer_; Ptr<IMotionStabilizer> motionStabilizer_;
Ptr<WobbleSuppressorBase> wobbleSuppressor_;
bool mustEstTrimRatio_; bool mustEstTrimRatio_;
int frameCount_; int frameCount_;
bool isPrePassDone_; bool isPrePassDone_;
bool doWobbleSuppression_;
std::vector<Mat> motions2_;
Mat suppressedFrame_;
}; };
//! @}
} // namespace videostab } // namespace videostab
} // namespace cv } // namespace cv

Some files were not shown because too many files have changed in this diff Show More